"...resnet50_tensorflow.git" did not exist on "6c874e17fc7aaf244e50665facb834a38988081b"
Commit a5056cfb authored by comfyanonymous's avatar comfyanonymous
Browse files

Remove useless code.

parent b12b48e1
...@@ -104,9 +104,7 @@ def attention_basic(q, k, v, heads, mask=None): ...@@ -104,9 +104,7 @@ def attention_basic(q, k, v, heads, mask=None):
# force cast to fp32 to avoid overflowing # force cast to fp32 to avoid overflowing
if _ATTN_PRECISION =="fp32": if _ATTN_PRECISION =="fp32":
with torch.autocast(enabled=False, device_type = 'cuda'): sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
q, k = q.float(), k.float()
sim = einsum('b i d, b j d -> b i j', q, k) * scale
else: else:
sim = einsum('b i d, b j d -> b i j', q, k) * scale sim = einsum('b i d, b j d -> b i j', q, k) * scale
......
...@@ -27,9 +27,7 @@ def attention_basic_with_sim(q, k, v, heads, mask=None): ...@@ -27,9 +27,7 @@ def attention_basic_with_sim(q, k, v, heads, mask=None):
# force cast to fp32 to avoid overflowing # force cast to fp32 to avoid overflowing
if _ATTN_PRECISION =="fp32": if _ATTN_PRECISION =="fp32":
with torch.autocast(enabled=False, device_type = 'cuda'): sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
q, k = q.float(), k.float()
sim = einsum('b i d, b j d -> b i j', q, k) * scale
else: else:
sim = einsum('b i d, b j d -> b i j', q, k) * scale sim = einsum('b i d, b j d -> b i j', q, k) * scale
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment