Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
88733c99
Commit
88733c99
authored
Oct 11, 2023
by
comfyanonymous
Browse files
pytorch_attention_enabled can now return True when xformers is enabled.
parent
20d3852a
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
7 additions
and
4 deletions
+7
-4
comfy/ldm/modules/diffusionmodules/model.py
comfy/ldm/modules/diffusionmodules/model.py
+1
-1
comfy/model_management.py
comfy/model_management.py
+6
-3
No files found.
comfy/ldm/modules/diffusionmodules/model.py
View file @
88733c99
...
...
@@ -355,7 +355,7 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
assert
attn_type
in
[
"vanilla"
,
"vanilla-xformers"
,
"memory-efficient-cross-attn"
,
"linear"
,
"none"
],
f
'attn_type
{
attn_type
}
unknown'
if
model_management
.
xformers_enabled_vae
()
and
attn_type
==
"vanilla"
:
attn_type
=
"vanilla-xformers"
if
model_management
.
pytorch_attention_enabled
()
and
attn_type
==
"vanilla"
:
el
if
model_management
.
pytorch_attention_enabled
()
and
attn_type
==
"vanilla"
:
attn_type
=
"vanilla-pytorch"
print
(
f
"making attention of type '
{
attn_type
}
' with
{
in_channels
}
in_channels"
)
if
attn_type
==
"vanilla"
:
...
...
comfy/model_management.py
View file @
88733c99
...
...
@@ -154,14 +154,18 @@ def is_nvidia():
return
True
return
False
ENABLE_PYTORCH_ATTENTION
=
args
.
use_pytorch_cross_attention
ENABLE_PYTORCH_ATTENTION
=
False
if
args
.
use_pytorch_cross_attention
:
ENABLE_PYTORCH_ATTENTION
=
True
XFORMERS_IS_AVAILABLE
=
False
VAE_DTYPE
=
torch
.
float32
try
:
if
is_nvidia
():
torch_version
=
torch
.
version
.
__version__
if
int
(
torch_version
[
0
])
>=
2
:
if
ENABLE_PYTORCH_ATTENTION
==
False
and
XFORMERS_IS_AVAILABLE
==
False
and
args
.
use_split_cross_attention
==
False
and
args
.
use_quad_cross_attention
==
False
:
if
ENABLE_PYTORCH_ATTENTION
==
False
and
args
.
use_split_cross_attention
==
False
and
args
.
use_quad_cross_attention
==
False
:
ENABLE_PYTORCH_ATTENTION
=
True
if
torch
.
cuda
.
is_bf16_supported
():
VAE_DTYPE
=
torch
.
bfloat16
...
...
@@ -186,7 +190,6 @@ if ENABLE_PYTORCH_ATTENTION:
torch
.
backends
.
cuda
.
enable_math_sdp
(
True
)
torch
.
backends
.
cuda
.
enable_flash_sdp
(
True
)
torch
.
backends
.
cuda
.
enable_mem_efficient_sdp
(
True
)
XFORMERS_IS_AVAILABLE
=
False
if
args
.
lowvram
:
set_vram_to
=
VRAMState
.
LOW_VRAM
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment