Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
b8c7c770
Commit
b8c7c770
authored
Aug 27, 2023
by
comfyanonymous
Browse files
Enable bf16-vae by default on ampere and up.
parent
1c794a21
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
24 additions
and
14 deletions
+24
-14
comfy/cli_args.py
comfy/cli_args.py
+2
-1
comfy/model_management.py
comfy/model_management.py
+22
-13
No files found.
comfy/cli_args.py
View file @
b8c7c770
...
...
@@ -54,7 +54,8 @@ fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.")
fpvae_group
=
parser
.
add_mutually_exclusive_group
()
fpvae_group
.
add_argument
(
"--fp16-vae"
,
action
=
"store_true"
,
help
=
"Run the VAE in fp16, might cause black images."
)
fpvae_group
.
add_argument
(
"--bf16-vae"
,
action
=
"store_true"
,
help
=
"Run the VAE in bf16, might lower quality."
)
fpvae_group
.
add_argument
(
"--fp32-vae"
,
action
=
"store_true"
,
help
=
"Run the VAE in full precision fp32."
)
fpvae_group
.
add_argument
(
"--bf16-vae"
,
action
=
"store_true"
,
help
=
"Run the VAE in bf16."
)
parser
.
add_argument
(
"--directml"
,
type
=
int
,
nargs
=
"?"
,
metavar
=
"DIRECTML_DEVICE"
,
const
=-
1
,
help
=
"Use torch-directml."
)
...
...
comfy/model_management.py
View file @
b8c7c770
...
...
@@ -148,16 +148,28 @@ def is_nvidia():
return
True
ENABLE_PYTORCH_ATTENTION
=
args
.
use_pytorch_cross_attention
VAE_DTYPE
=
torch
.
float32
if
ENABLE_PYTORCH_ATTENTION
==
False
and
XFORMERS_IS_AVAILABLE
==
False
and
args
.
use_split_cross_attention
==
False
and
args
.
use_quad_cross_attention
==
False
:
try
:
try
:
if
is_nvidia
():
torch_version
=
torch
.
version
.
__version__
if
int
(
torch_version
[
0
])
>=
2
:
if
ENABLE_PYTORCH_ATTENTION
==
False
and
XFORMERS_IS_AVAILABLE
==
False
and
args
.
use_split_cross_attention
==
False
and
args
.
use_quad_cross_attention
==
False
:
ENABLE_PYTORCH_ATTENTION
=
True
except
:
if
torch
.
cuda
.
is_bf16_supported
():
VAE_DTYPE
=
torch
.
bfloat16
except
:
pass
if
args
.
fp16_vae
:
VAE_DTYPE
=
torch
.
float16
elif
args
.
bf16_vae
:
VAE_DTYPE
=
torch
.
bfloat16
elif
args
.
fp32_vae
:
VAE_DTYPE
=
torch
.
float32
if
ENABLE_PYTORCH_ATTENTION
:
torch
.
backends
.
cuda
.
enable_math_sdp
(
True
)
torch
.
backends
.
cuda
.
enable_flash_sdp
(
True
)
...
...
@@ -228,6 +240,7 @@ try:
except
:
print
(
"Could not pick default device."
)
print
(
"VAE dtype:"
,
VAE_DTYPE
)
current_loaded_models
=
[]
...
...
@@ -448,12 +461,8 @@ def vae_offload_device():
return
torch
.
device
(
"cpu"
)
def
vae_dtype
():
if
args
.
fp16_vae
:
return
torch
.
float16
elif
args
.
bf16_vae
:
return
torch
.
bfloat16
else
:
return
torch
.
float32
global
VAE_DTYPE
return
VAE_DTYPE
def
get_autocast_device
(
dev
):
if
hasattr
(
dev
,
'type'
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment