Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
c6951548
Commit
c6951548
authored
Jan 07, 2024
by
comfyanonymous
Browse files
Update optimized_attention_for_device function for new functions that
support masked attention.
parent
aaa90173
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
9 additions
and
10 deletions
+9
-10
comfy/clip_model.py
comfy/clip_model.py
+1
-1
comfy/ldm/modules/attention.py
comfy/ldm/modules/attention.py
+8
-9
No files found.
comfy/clip_model.py
View file @
c6951548
...
...
@@ -57,7 +57,7 @@ class CLIPEncoder(torch.nn.Module):
self
.
layers
=
torch
.
nn
.
ModuleList
([
CLIPLayer
(
embed_dim
,
heads
,
intermediate_size
,
intermediate_activation
,
dtype
,
device
,
operations
)
for
i
in
range
(
num_layers
)])
def
forward
(
self
,
x
,
mask
=
None
,
intermediate_output
=
None
):
optimized_attention
=
optimized_attention_for_device
(
x
.
device
,
mask
=
mask
is
not
None
)
optimized_attention
=
optimized_attention_for_device
(
x
.
device
,
mask
=
mask
is
not
None
,
small_input
=
True
)
if
intermediate_output
is
not
None
:
if
intermediate_output
<
0
:
...
...
comfy/ldm/modules/attention.py
View file @
c6951548
...
...
@@ -333,7 +333,6 @@ def attention_pytorch(q, k, v, heads, mask=None):
optimized_attention
=
attention_basic
optimized_attention_masked
=
attention_basic
if
model_management
.
xformers_enabled
():
print
(
"Using xformers cross attention"
)
...
...
@@ -349,15 +348,15 @@ else:
print
(
"Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention"
)
optimized_attention
=
attention_sub_quad
if
model_management
.
pytorch_attention_enabled
():
optimized_attention_masked
=
attention_pytorch
optimized_attention_masked
=
optimized_attention
def
optimized_attention_for_device
(
device
,
mask
=
False
,
small_input
=
False
):
if
small_input
and
model_management
.
pytorch_attention_enabled
():
return
attention_pytorch
#TODO: need to confirm but this is probably slightly faster for small inputs in all cases
if
device
==
torch
.
device
(
"cpu"
):
return
attention_sub_quad
def
optimized_attention_for_device
(
device
,
mask
=
False
):
if
device
==
torch
.
device
(
"cpu"
):
#TODO
if
model_management
.
pytorch_attention_enabled
():
return
attention_pytorch
else
:
return
attention_basic
if
mask
:
return
optimized_attention_masked
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment