attention.py 28.9 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
7
import logging
comfyanonymous's avatar
comfyanonymous committed
8

comfyanonymous's avatar
comfyanonymous committed
9
from .diffusionmodules.util import AlphaBlender, timestep_embedding
comfyanonymous's avatar
comfyanonymous committed
10
11
from .sub_quadratic_attention import efficient_dot_product_attention

12
from comfy import model_management
13

14
if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
15
16
17
    import xformers
    import xformers.ops

comfyanonymous's avatar
comfyanonymous committed
18
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
19
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
20
ops = comfy.ops.disable_weight_init
comfyanonymous's avatar
comfyanonymous committed
21

22
23
24
25
26
27
28
29

def get_attn_precision(attn_precision):
    if args.dont_upcast_attention:
        return None
    if attn_precision is None and args.force_upcast_attention:
        return torch.float32
    return attn_precision

comfyanonymous's avatar
comfyanonymous committed
30
31
32
33
34
35
36
37
38
39
40
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
41
    return d
comfyanonymous's avatar
comfyanonymous committed
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
57
    def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
58
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
59
        self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
60
61
62
63
64
65
66

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
67
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
68
69
70
71
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
72
            operations.Linear(dim, inner_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
73
            nn.GELU()
comfyanonymous's avatar
comfyanonymous committed
74
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
75
76
77
78

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
comfyanonymous's avatar
comfyanonymous committed
79
            operations.Linear(inner_dim, dim_out, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
80
81
82
83
84
        )

    def forward(self, x):
        return self.net(x)

85
86
def Normalize(in_channels, dtype=None, device=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
87

88
def attention_basic(q, k, v, heads, mask=None, attn_precision=None):
89
90
    attn_precision = get_attn_precision(attn_precision)

91
92
93
94
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

95
    h = heads
96
97
98
99
100
101
102
103
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
104
105

    # force cast to fp32 to avoid overflowing
106
    if attn_precision == torch.float32:
comfyanonymous's avatar
comfyanonymous committed
107
        sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
108
109
    else:
        sim = einsum('b i d, b j d -> b i j', q, k) * scale
comfyanonymous's avatar
comfyanonymous committed
110

111
    del q, k
comfyanonymous's avatar
comfyanonymous committed
112

113
    if exists(mask):
114
115
116
117
118
119
        if mask.dtype == torch.bool:
            mask = rearrange(mask, 'b ... -> b (...)') #TODO: check if this bool part matches pytorch attention
            max_neg_value = -torch.finfo(sim.dtype).max
            mask = repeat(mask, 'b j -> (b h) () j', h=h)
            sim.masked_fill_(~mask, max_neg_value)
        else:
120
121
122
123
            if len(mask.shape) == 2:
                bs = 1
            else:
                bs = mask.shape[0]
124
            mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
125
            sim.add_(mask)
comfyanonymous's avatar
comfyanonymous committed
126

127
128
    # attention, what we cannot get enough of
    sim = sim.softmax(dim=-1)
comfyanonymous's avatar
comfyanonymous committed
129

130
    out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
131
132
133
134
135
136
    out = (
        out.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
137
    return out
comfyanonymous's avatar
comfyanonymous committed
138
139


140
def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None):
141
142
    attn_precision = get_attn_precision(attn_precision)

143
144
145
146
147
148
149
150
    b, _, dim_head = query.shape
    dim_head //= heads

    scale = dim_head ** -0.5
    query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
    value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)

    key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1)
comfyanonymous's avatar
comfyanonymous committed
151

152
    dtype = query.dtype
153
    upcast_attention = attn_precision == torch.float32 and query.dtype != torch.float32
154
155
156
157
158
    if upcast_attention:
        bytes_per_token = torch.finfo(torch.float32).bits//8
    else:
        bytes_per_token = torch.finfo(query.dtype).bits//8
    batch_x_heads, q_tokens, _ = query.shape
159
    _, _, k_tokens = key.shape
160
    qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
comfyanonymous's avatar
comfyanonymous committed
161

162
    mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
comfyanonymous's avatar
comfyanonymous committed
163

164
    kv_chunk_size_min = None
165
166
167
168
169
170
171
172
173
174
175
176
    kv_chunk_size = None
    query_chunk_size = None

    for x in [4096, 2048, 1024, 512, 256]:
        count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0)
        if count >= k_tokens:
            kv_chunk_size = k_tokens
            query_chunk_size = x
            break

    if query_chunk_size is None:
        query_chunk_size = 512
177

178
    if mask is not None:
179
180
181
182
        if len(mask.shape) == 2:
            bs = 1
        else:
            bs = mask.shape[0]
183
        mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
184

185
186
    hidden_states = efficient_dot_product_attention(
        query,
187
        key,
188
189
190
191
192
193
        value,
        query_chunk_size=query_chunk_size,
        kv_chunk_size=kv_chunk_size,
        kv_chunk_size_min=kv_chunk_size_min,
        use_checkpoint=False,
        upcast_attention=upcast_attention,
194
        mask=mask,
195
196
197
198
199
200
201
    )

    hidden_states = hidden_states.to(dtype)

    hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2)
    return hidden_states

202
def attention_split(q, k, v, heads, mask=None, attn_precision=None):
203
204
    attn_precision = get_attn_precision(attn_precision)

205
206
207
208
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

209
    h = heads
210
211
212
213
214
215
216
217
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
218
219
220
221
222

    r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)

    mem_free_total = model_management.get_free_memory(q.device)

223
    if attn_precision == torch.float32:
224
        element_size = 4
225
        upcast = True
226
227
    else:
        element_size = q.element_size()
228
        upcast = False
229

230
    gb = 1024 ** 3
231
    tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size
232
    modifier = 3
233
234
235
236
237
238
239
240
241
242
243
244
245
246
    mem_required = tensor_size * modifier
    steps = 1


    if mem_required > mem_free_total:
        steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
        # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
        #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

    if steps > 64:
        max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
        raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                            f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

247
    if mask is not None:
248
249
250
251
        if len(mask.shape) == 2:
            bs = 1
        else:
            bs = mask.shape[0]
252
        mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
253

254
255
256
257
258
259
260
261
    # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
    first_op_done = False
    cleared_cache = False
    while True:
        try:
            slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
            for i in range(0, q.shape[1], slice_size):
                end = i + slice_size
262
                if upcast:
263
264
                    with torch.autocast(enabled=False, device_type = 'cuda'):
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale
comfyanonymous's avatar
comfyanonymous committed
265
                else:
266
267
                    s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale

268
269
270
271
272
273
                if mask is not None:
                    if len(mask.shape) == 2:
                        s1 += mask[i:end]
                    else:
                        s1 += mask[:, i:end]

274
275
                s2 = s1.softmax(dim=-1).to(v.dtype)
                del s1
276
                first_op_done = True
277
278
279
280
281
282
283
284
285

                r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                del s2
            break
        except model_management.OOM_EXCEPTION as e:
            if first_op_done == False:
                model_management.soft_empty_cache(True)
                if cleared_cache == False:
                    cleared_cache = True
286
                    logging.warning("out of memory error, emptying cache and trying again")
287
288
289
                    continue
                steps *= 2
                if steps > 64:
comfyanonymous's avatar
comfyanonymous committed
290
                    raise e
291
                logging.warning("out of memory error, increasing steps and trying again {}".format(steps))
292
293
294
295
296
            else:
                raise e

    del q, k, v

297
298
299
300
301
302
303
    r1 = (
        r1.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
    return r1
304

305
306
307
BROKEN_XFORMERS = False
try:
    x_vers = xformers.__version__
comfyanonymous's avatar
comfyanonymous committed
308
309
    # XFormers bug confirmed on all versions from 0.0.21 to 0.0.26 (q with bs bigger than 65535 gives CUDA error)
    BROKEN_XFORMERS = x_vers.startswith("0.0.2") and not x_vers.startswith("0.0.20")
310
311
312
except:
    pass

313
def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
314
315
    b, _, dim_head = q.shape
    dim_head //= heads
316
317
318

    disabled_xformers = False

319
320
    if BROKEN_XFORMERS:
        if b * heads > 65535:
321
322
323
324
325
326
327
328
            disabled_xformers = True

    if not disabled_xformers:
        if torch.jit.is_tracing() or torch.jit.is_scripting():
            disabled_xformers = True

    if disabled_xformers:
        return attention_pytorch(q, k, v, heads, mask)
329

330
    q, k, v = map(
comfyanonymous's avatar
comfyanonymous committed
331
        lambda t: t.reshape(b, -1, heads, dim_head),
332
333
334
        (q, k, v),
    )

335
336
337
338
339
340
341
    if mask is not None:
        pad = 8 - q.shape[1] % 8
        mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device)
        mask_out[:, :, :mask.shape[-1]] = mask
        mask = mask_out[:, :, :mask.shape[-1]]

    out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
342
343

    out = (
comfyanonymous's avatar
comfyanonymous committed
344
        out.reshape(b, -1, heads * dim_head)
345
346
347
    )
    return out

348
def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None):
349
350
351
352
353
354
355
    b, _, dim_head = q.shape
    dim_head //= heads
    q, k, v = map(
        lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
        (q, k, v),
    )

356
    out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
357
358
359
360
361
    out = (
        out.transpose(1, 2).reshape(b, -1, heads * dim_head)
    )
    return out

362

363
optimized_attention = attention_basic
comfyanonymous's avatar
comfyanonymous committed
364

365
if model_management.xformers_enabled():
366
    logging.info("Using xformers cross attention")
367
368
    optimized_attention = attention_xformers
elif model_management.pytorch_attention_enabled():
369
    logging.info("Using pytorch cross attention")
370
371
372
    optimized_attention = attention_pytorch
else:
    if args.use_split_cross_attention:
373
        logging.info("Using split optimization for cross attention")
374
375
        optimized_attention = attention_split
    else:
376
        logging.info("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
377
        optimized_attention = attention_sub_quad
comfyanonymous's avatar
comfyanonymous committed
378

379
380
381
optimized_attention_masked = optimized_attention

def optimized_attention_for_device(device, mask=False, small_input=False):
382
383
384
385
386
    if small_input:
        if model_management.pytorch_attention_enabled():
            return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases
        else:
            return attention_basic
387
388
389

    if device == torch.device("cpu"):
        return attention_sub_quad
390

391
392
393
394
395
396
    if mask:
        return optimized_attention_masked

    return optimized_attention


397
class CrossAttention(nn.Module):
398
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., attn_precision=None, dtype=None, device=None, operations=ops):
399
400
401
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)
402
        self.attn_precision = attn_precision
403
404
405
406

        self.heads = heads
        self.dim_head = dim_head

comfyanonymous's avatar
comfyanonymous committed
407
408
409
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
410

comfyanonymous's avatar
comfyanonymous committed
411
        self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
412

413
    def forward(self, x, context=None, value=None, mask=None):
414
415
416
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
417
418
419
420
421
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
422

423
        if mask is None:
424
            out = optimized_attention(q, k, v, self.heads, attn_precision=self.attn_precision)
425
        else:
426
            out = optimized_attention_masked(q, k, v, self.heads, mask, attn_precision=self.attn_precision)
427
428
        return self.to_out(out)

429

comfyanonymous's avatar
comfyanonymous committed
430
class BasicTransformerBlock(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
431
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None,
432
                 disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, attn_precision=None, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
433
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
434
435
436
437
438
439

        self.ff_in = ff_in or inner_dim is not None
        if inner_dim is None:
            inner_dim = dim

        self.is_res = inner_dim == dim
comfyanonymous's avatar
comfyanonymous committed
440
        self.attn_precision = attn_precision
comfyanonymous's avatar
comfyanonymous committed
441
442

        if self.ff_in:
comfyanonymous's avatar
comfyanonymous committed
443
            self.norm_in = operations.LayerNorm(dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
444
445
            self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

comfyanonymous's avatar
comfyanonymous committed
446
        self.disable_self_attn = disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
447
        self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,
comfyanonymous's avatar
comfyanonymous committed
448
                              context_dim=context_dim if self.disable_self_attn else None, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations)  # is a self-attention if not self.disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
449
450
451
452
453
454
455
456
457
458
459
460
461
        self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

        if disable_temporal_crossattention:
            if switch_temporal_ca_to_sa:
                raise ValueError
            else:
                self.attn2 = None
        else:
            context_dim_attn2 = None
            if not switch_temporal_ca_to_sa:
                context_dim_attn2 = context_dim

            self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2,
comfyanonymous's avatar
comfyanonymous committed
462
                                heads=n_heads, dim_head=d_head, dropout=dropout, attn_precision=self.attn_precision, dtype=dtype, device=device, operations=operations)  # is self-attn if context is none
463
            self.norm2 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
464

465
466
        self.norm1 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
        self.norm3 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
467
468
        self.n_heads = n_heads
        self.d_head = d_head
comfyanonymous's avatar
comfyanonymous committed
469
        self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa
comfyanonymous's avatar
comfyanonymous committed
470

471
    def forward(self, x, context=None, transformer_options={}):
472
        extra_options = {}
473
474
        block = transformer_options.get("block", None)
        block_index = transformer_options.get("block_index", 0)
475
476
477
478
479
480
481
482
483
484
        transformer_patches = {}
        transformer_patches_replace = {}

        for k in transformer_options:
            if k == "patches":
                transformer_patches = transformer_options[k]
            elif k == "patches_replace":
                transformer_patches_replace = transformer_options[k]
            else:
                extra_options[k] = transformer_options[k]
485

486
487
        extra_options["n_heads"] = self.n_heads
        extra_options["dim_head"] = self.d_head
comfyanonymous's avatar
comfyanonymous committed
488
        extra_options["attn_precision"] = self.attn_precision
489

comfyanonymous's avatar
comfyanonymous committed
490
491
492
493
494
495
        if self.ff_in:
            x_skip = x
            x = self.ff_in(self.norm_in(x))
            if self.is_res:
                x += x_skip

496
        n = self.norm1(x)
497
498
499
500
501
502
503
504
505
506
507
508
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
509
                n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options)
510

comfyanonymous's avatar
comfyanonymous committed
511
512
513
514
        if block is not None:
            transformer_block = (block[0], block[1], block_index)
        else:
            transformer_block = None
515
516
517
518
519
520
521
522
523
524
525
526
527
528
        attn1_replace_patch = transformer_patches_replace.get("attn1", {})
        block_attn1 = transformer_block
        if block_attn1 not in attn1_replace_patch:
            block_attn1 = block

        if block_attn1 in attn1_replace_patch:
            if context_attn1 is None:
                context_attn1 = n
                value_attn1 = n
            n = self.attn1.to_q(n)
            context_attn1 = self.attn1.to_k(context_attn1)
            value_attn1 = self.attn1.to_v(value_attn1)
            n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options)
            n = self.attn1.to_out(n)
529
        else:
530
            n = self.attn1(n, context=context_attn1, value=value_attn1)
531

532
533
534
535
536
        if "attn1_output_patch" in transformer_patches:
            patch = transformer_patches["attn1_output_patch"]
            for p in patch:
                n = p(n, extra_options)

537
        x += n
538
539
540
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
541
                x = p(x, extra_options)
542

comfyanonymous's avatar
comfyanonymous committed
543
544
545
546
547
548
549
550
551
        if self.attn2 is not None:
            n = self.norm2(x)
            if self.switch_temporal_ca_to_sa:
                context_attn2 = n
            else:
                context_attn2 = context
            value_attn2 = None
            if "attn2_patch" in transformer_patches:
                patch = transformer_patches["attn2_patch"]
552
                value_attn2 = context_attn2
comfyanonymous's avatar
comfyanonymous committed
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
                for p in patch:
                    n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)

            attn2_replace_patch = transformer_patches_replace.get("attn2", {})
            block_attn2 = transformer_block
            if block_attn2 not in attn2_replace_patch:
                block_attn2 = block

            if block_attn2 in attn2_replace_patch:
                if value_attn2 is None:
                    value_attn2 = context_attn2
                n = self.attn2.to_q(n)
                context_attn2 = self.attn2.to_k(context_attn2)
                value_attn2 = self.attn2.to_v(value_attn2)
                n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
                n = self.attn2.to_out(n)
            else:
                n = self.attn2(n, context=context_attn2, value=value_attn2)
571

572
573
574
575
576
        if "attn2_output_patch" in transformer_patches:
            patch = transformer_patches["attn2_output_patch"]
            for p in patch:
                n = p(n, extra_options)

577
        x += n
comfyanonymous's avatar
comfyanonymous committed
578
579
580
581
582
583
        if self.is_res:
            x_skip = x
        x = self.ff(self.norm3(x))
        if self.is_res:
            x += x_skip

comfyanonymous's avatar
comfyanonymous committed
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
599
                 use_checkpoint=True, attn_precision=None, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
600
601
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
602
            context_dim = [context_dim] * depth
comfyanonymous's avatar
comfyanonymous committed
603
604
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
605
        self.norm = operations.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
606
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
607
            self.proj_in = operations.Conv2d(in_channels,
comfyanonymous's avatar
comfyanonymous committed
608
609
610
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
611
                                     padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
612
        else:
comfyanonymous's avatar
comfyanonymous committed
613
            self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
614
615
616

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
617
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, attn_precision=attn_precision, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
618
619
620
                for d in range(depth)]
        )
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
621
            self.proj_out = operations.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
622
623
                                                  kernel_size=1,
                                                  stride=1,
624
                                                  padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
625
        else:
comfyanonymous's avatar
comfyanonymous committed
626
            self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
627
628
        self.use_linear = use_linear

629
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
630
631
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
632
            context = [context] * len(self.transformer_blocks)
comfyanonymous's avatar
comfyanonymous committed
633
634
635
636
637
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
comfyanonymous's avatar
comfyanonymous committed
638
        x = x.movedim(1, 3).flatten(1, 2).contiguous()
comfyanonymous's avatar
comfyanonymous committed
639
640
641
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
642
            transformer_options["block_index"] = i
643
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
644
645
        if self.use_linear:
            x = self.proj_out(x)
comfyanonymous's avatar
comfyanonymous committed
646
        x = x.reshape(x.shape[0], h, w, x.shape[-1]).movedim(3, 1).contiguous()
comfyanonymous's avatar
comfyanonymous committed
647
648
649
650
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in

comfyanonymous's avatar
comfyanonymous committed
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672

class SpatialVideoTransformer(SpatialTransformer):
    def __init__(
        self,
        in_channels,
        n_heads,
        d_head,
        depth=1,
        dropout=0.0,
        use_linear=False,
        context_dim=None,
        use_spatial_context=False,
        timesteps=None,
        merge_strategy: str = "fixed",
        merge_factor: float = 0.5,
        time_context_dim=None,
        ff_in=False,
        checkpoint=False,
        time_depth=1,
        disable_self_attn=False,
        disable_temporal_crossattention=False,
        max_time_embed_period: int = 10000,
673
        attn_precision=None,
comfyanonymous's avatar
comfyanonymous committed
674
        dtype=None, device=None, operations=ops
comfyanonymous's avatar
comfyanonymous committed
675
676
677
678
679
680
681
682
683
684
685
    ):
        super().__init__(
            in_channels,
            n_heads,
            d_head,
            depth=depth,
            dropout=dropout,
            use_checkpoint=checkpoint,
            context_dim=context_dim,
            use_linear=use_linear,
            disable_self_attn=disable_self_attn,
686
            attn_precision=attn_precision,
comfyanonymous's avatar
comfyanonymous committed
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
            dtype=dtype, device=device, operations=operations
        )
        self.time_depth = time_depth
        self.depth = depth
        self.max_time_embed_period = max_time_embed_period

        time_mix_d_head = d_head
        n_time_mix_heads = n_heads

        time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads)

        inner_dim = n_heads * d_head
        if use_spatial_context:
            time_context_dim = context_dim

        self.time_stack = nn.ModuleList(
            [
                BasicTransformerBlock(
                    inner_dim,
                    n_time_mix_heads,
                    time_mix_d_head,
                    dropout=dropout,
                    context_dim=time_context_dim,
                    # timesteps=timesteps,
                    checkpoint=checkpoint,
                    ff_in=ff_in,
                    inner_dim=time_mix_inner_dim,
                    disable_self_attn=disable_self_attn,
                    disable_temporal_crossattention=disable_temporal_crossattention,
716
                    attn_precision=attn_precision,
comfyanonymous's avatar
comfyanonymous committed
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
                    dtype=dtype, device=device, operations=operations
                )
                for _ in range(self.depth)
            ]
        )

        assert len(self.time_stack) == len(self.transformer_blocks)

        self.use_spatial_context = use_spatial_context
        self.in_channels = in_channels

        time_embed_dim = self.in_channels * 4
        self.time_pos_embed = nn.Sequential(
            operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device),
            nn.SiLU(),
            operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device),
        )

        self.time_mixer = AlphaBlender(
            alpha=merge_factor, merge_strategy=merge_strategy
        )

    def forward(
        self,
        x: torch.Tensor,
        context: Optional[torch.Tensor] = None,
        time_context: Optional[torch.Tensor] = None,
        timesteps: Optional[int] = None,
        image_only_indicator: Optional[torch.Tensor] = None,
        transformer_options={}
    ) -> torch.Tensor:
        _, _, h, w = x.shape
        x_in = x
        spatial_context = None
        if exists(context):
            spatial_context = context

        if self.use_spatial_context:
            assert (
                context.ndim == 3
            ), f"n dims of spatial context should be 3 but are {context.ndim}"

            if time_context is None:
                time_context = context
            time_context_first_timestep = time_context[::timesteps]
            time_context = repeat(
                time_context_first_timestep, "b ... -> (b n) ...", n=h * w
            )
        elif time_context is not None and not self.use_spatial_context:
            time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w)
            if time_context.ndim == 2:
                time_context = rearrange(time_context, "b c -> b 1 c")

        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, "b c h w -> b (h w) c")
        if self.use_linear:
            x = self.proj_in(x)

        num_frames = torch.arange(timesteps, device=x.device)
        num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
        num_frames = rearrange(num_frames, "b t -> (b t)")
        t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype)
        emb = self.time_pos_embed(t_emb)
        emb = emb[:, None, :]

        for it_, (block, mix_block) in enumerate(
            zip(self.transformer_blocks, self.time_stack)
        ):
            transformer_options["block_index"] = it_
            x = block(
                x,
                context=spatial_context,
                transformer_options=transformer_options,
            )

            x_mix = x
            x_mix = x_mix + emb

            B, S, C = x_mix.shape
            x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps)
            x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options
            x_mix = rearrange(
                x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps
            )

            x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator)

        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
        if not self.use_linear:
            x = self.proj_out(x)
        out = x + x_in
        return out