Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
renzhc
diffusers_dcu
Commits
ca0747a0
Unverified
Commit
ca0747a0
authored
Jul 26, 2024
by
Aryan
Committed by
GitHub
Jul 26, 2024
Browse files
remove unused code from pag attn procs (#8928)
parent
5c53ca5e
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
0 additions
and
12 deletions
+0
-12
src/diffusers/models/attention_processor.py
src/diffusers/models/attention_processor.py
+0
-12
No files found.
src/diffusers/models/attention_processor.py
View file @
ca0747a0
...
@@ -2962,12 +2962,6 @@ class PAGIdentitySelfAttnProcessor2_0:
...
@@ -2962,12 +2962,6 @@ class PAGIdentitySelfAttnProcessor2_0:
# perturbed path (identity attention)
# perturbed path (identity attention)
batch_size
,
sequence_length
,
_
=
hidden_states_ptb
.
shape
batch_size
,
sequence_length
,
_
=
hidden_states_ptb
.
shape
if
attention_mask
is
not
None
:
attention_mask
=
attn
.
prepare_attention_mask
(
attention_mask
,
sequence_length
,
batch_size
)
# scaled_dot_product_attention expects attention_mask shape to be
# (batch, heads, source_length, target_length)
attention_mask
=
attention_mask
.
view
(
batch_size
,
attn
.
heads
,
-
1
,
attention_mask
.
shape
[
-
1
])
if
attn
.
group_norm
is
not
None
:
if
attn
.
group_norm
is
not
None
:
hidden_states_ptb
=
attn
.
group_norm
(
hidden_states_ptb
.
transpose
(
1
,
2
)).
transpose
(
1
,
2
)
hidden_states_ptb
=
attn
.
group_norm
(
hidden_states_ptb
.
transpose
(
1
,
2
)).
transpose
(
1
,
2
)
...
@@ -3070,12 +3064,6 @@ class PAGCFGIdentitySelfAttnProcessor2_0:
...
@@ -3070,12 +3064,6 @@ class PAGCFGIdentitySelfAttnProcessor2_0:
# perturbed path (identity attention)
# perturbed path (identity attention)
batch_size
,
sequence_length
,
_
=
hidden_states_ptb
.
shape
batch_size
,
sequence_length
,
_
=
hidden_states_ptb
.
shape
if
attention_mask
is
not
None
:
attention_mask
=
attn
.
prepare_attention_mask
(
attention_mask
,
sequence_length
,
batch_size
)
# scaled_dot_product_attention expects attention_mask shape to be
# (batch, heads, source_length, target_length)
attention_mask
=
attention_mask
.
view
(
batch_size
,
attn
.
heads
,
-
1
,
attention_mask
.
shape
[
-
1
])
if
attn
.
group_norm
is
not
None
:
if
attn
.
group_norm
is
not
None
:
hidden_states_ptb
=
attn
.
group_norm
(
hidden_states_ptb
.
transpose
(
1
,
2
)).
transpose
(
1
,
2
)
hidden_states_ptb
=
attn
.
group_norm
(
hidden_states_ptb
.
transpose
(
1
,
2
)).
transpose
(
1
,
2
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment