Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
f8706546
"megatron/vscode:/vscode.git/clone" did not exist on "67aa86192e8febab1d7b4ca22045531081cf445c"
Commit
f8706546
authored
Feb 17, 2024
by
comfyanonymous
Browse files
Fix attention mask batch size in some attention functions.
parent
3b9969c1
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
15 additions
and
3 deletions
+15
-3
comfy/ldm/modules/attention.py
comfy/ldm/modules/attention.py
+15
-3
No files found.
comfy/ldm/modules/attention.py
View file @
f8706546
...
...
@@ -114,7 +114,11 @@ def attention_basic(q, k, v, heads, mask=None):
mask
=
repeat
(
mask
,
'b j -> (b h) () j'
,
h
=
h
)
sim
.
masked_fill_
(
~
mask
,
max_neg_value
)
else
:
mask
=
mask
.
reshape
(
mask
.
shape
[
0
],
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
]).
expand
(
-
1
,
heads
,
-
1
,
-
1
).
reshape
(
sim
.
shape
)
if
len
(
mask
.
shape
)
==
2
:
bs
=
1
else
:
bs
=
mask
.
shape
[
0
]
mask
=
mask
.
reshape
(
bs
,
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
]).
expand
(
-
1
,
heads
,
-
1
,
-
1
).
reshape
(
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
])
sim
.
add_
(
mask
)
# attention, what we cannot get enough of
...
...
@@ -167,7 +171,11 @@ def attention_sub_quad(query, key, value, heads, mask=None):
query_chunk_size
=
512
if
mask
is
not
None
:
mask
=
mask
.
reshape
(
mask
.
shape
[
0
],
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
]).
expand
(
-
1
,
heads
,
-
1
,
-
1
).
reshape
(
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
])
if
len
(
mask
.
shape
)
==
2
:
bs
=
1
else
:
bs
=
mask
.
shape
[
0
]
mask
=
mask
.
reshape
(
bs
,
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
]).
expand
(
-
1
,
heads
,
-
1
,
-
1
).
reshape
(
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
])
hidden_states
=
efficient_dot_product_attention
(
query
,
...
...
@@ -228,7 +236,11 @@ def attention_split(q, k, v, heads, mask=None):
f
'Need:
{
mem_required
/
64
/
gb
:
0.1
f
}
GB free, Have:
{
mem_free_total
/
gb
:
0.1
f
}
GB free'
)
if
mask
is
not
None
:
mask
=
mask
.
reshape
(
mask
.
shape
[
0
],
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
]).
expand
(
-
1
,
heads
,
-
1
,
-
1
).
reshape
(
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
])
if
len
(
mask
.
shape
)
==
2
:
bs
=
1
else
:
bs
=
mask
.
shape
[
0
]
mask
=
mask
.
reshape
(
bs
,
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
]).
expand
(
-
1
,
heads
,
-
1
,
-
1
).
reshape
(
-
1
,
mask
.
shape
[
-
2
],
mask
.
shape
[
-
1
])
# print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
first_op_done
=
False
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment