"examples/vscode:/vscode.git/clone" did not exist on "d94c6b01445531a649f61b2faebfa767d8b1915f"
Commit 98f828fa authored by comfyanonymous's avatar comfyanonymous
Browse files

Remove unnecessary code.

parent 1c4af591
......@@ -318,11 +318,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
return attention_pytorch(q, k, v, heads, mask)
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, -1, heads, dim_head)
.permute(0, 2, 1, 3)
.reshape(b * heads, -1, dim_head)
.contiguous(),
lambda t: t.reshape(b, -1, heads, dim_head),
(q, k, v),
)
......@@ -335,10 +331,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
out = (
out.unsqueeze(0)
.reshape(b, heads, -1, dim_head)
.permute(0, 2, 1, 3)
.reshape(b, -1, heads * dim_head)
out.reshape(b, -1, heads * dim_head)
)
return out
......
......@@ -3,7 +3,6 @@ import math
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
from typing import Optional, Any
import logging
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment