"docs/references/multi_node_deployment/deploy_on_k8s.md" did not exist on "e8e18dcdcca0e6d4eacccd074bea9da2ad6a3e18"
Commit c9daec4c authored by comfyanonymous's avatar comfyanonymous
Browse files

Remove prints that are useless when xformers is enabled.

parent a7328e49
......@@ -343,7 +343,7 @@ class CrossAttentionDoggettx(nn.Module):
return self.to_out(r2)
class OriginalCrossAttention(nn.Module):
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
super().__init__()
inner_dim = dim_head * heads
......@@ -395,14 +395,13 @@ class OriginalCrossAttention(nn.Module):
return self.to_out(out)
import sys
if "--use-split-cross-attention" in sys.argv:
if XFORMERS_IS_AVAILBLE == False:
if "--use-split-cross-attention" in sys.argv:
print("Using split optimization for cross attention")
class CrossAttention(CrossAttentionDoggettx):
pass
else:
CrossAttention = CrossAttentionDoggettx
else:
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
class CrossAttention(CrossAttentionBirchSan):
pass
CrossAttention = CrossAttentionBirchSan
class MemoryEfficientCrossAttention(nn.Module):
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment