Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
0ec513d8
Commit
0ec513d8
authored
Jun 15, 2024
by
comfyanonymous
Browse files
Add a --force-channels-last to inference models in channel last mode.
parent
0e06b370
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
10 additions
and
0 deletions
+10
-0
comfy/cli_args.py
comfy/cli_args.py
+1
-0
comfy/model_base.py
comfy/model_base.py
+3
-0
comfy/model_management.py
comfy/model_management.py
+6
-0
No files found.
comfy/cli_args.py
View file @
0ec513d8
...
@@ -75,6 +75,7 @@ fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store
...
@@ -75,6 +75,7 @@ fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store
fpte_group
.
add_argument
(
"--fp16-text-enc"
,
action
=
"store_true"
,
help
=
"Store text encoder weights in fp16."
)
fpte_group
.
add_argument
(
"--fp16-text-enc"
,
action
=
"store_true"
,
help
=
"Store text encoder weights in fp16."
)
fpte_group
.
add_argument
(
"--fp32-text-enc"
,
action
=
"store_true"
,
help
=
"Store text encoder weights in fp32."
)
fpte_group
.
add_argument
(
"--fp32-text-enc"
,
action
=
"store_true"
,
help
=
"Store text encoder weights in fp32."
)
parser
.
add_argument
(
"--force-channels-last"
,
action
=
"store_true"
,
help
=
"Force channels last format when inferencing the models."
)
parser
.
add_argument
(
"--directml"
,
type
=
int
,
nargs
=
"?"
,
metavar
=
"DIRECTML_DEVICE"
,
const
=-
1
,
help
=
"Use torch-directml."
)
parser
.
add_argument
(
"--directml"
,
type
=
int
,
nargs
=
"?"
,
metavar
=
"DIRECTML_DEVICE"
,
const
=-
1
,
help
=
"Use torch-directml."
)
...
...
comfy/model_base.py
View file @
0ec513d8
...
@@ -66,6 +66,9 @@ class BaseModel(torch.nn.Module):
...
@@ -66,6 +66,9 @@ class BaseModel(torch.nn.Module):
else
:
else
:
operations
=
comfy
.
ops
.
disable_weight_init
operations
=
comfy
.
ops
.
disable_weight_init
self
.
diffusion_model
=
unet_model
(
**
unet_config
,
device
=
device
,
operations
=
operations
)
self
.
diffusion_model
=
unet_model
(
**
unet_config
,
device
=
device
,
operations
=
operations
)
if
comfy
.
model_management
.
force_channels_last
():
self
.
diffusion_model
.
to
(
memory_format
=
torch
.
channels_last
)
logging
.
debug
(
"using channels last mode for diffusion model"
)
self
.
model_type
=
model_type
self
.
model_type
=
model_type
self
.
model_sampling
=
model_sampling
(
model_config
,
model_type
)
self
.
model_sampling
=
model_sampling
(
model_config
,
model_type
)
...
...
comfy/model_management.py
View file @
0ec513d8
...
@@ -673,6 +673,12 @@ def device_should_use_non_blocking(device):
...
@@ -673,6 +673,12 @@ def device_should_use_non_blocking(device):
return
False
return
False
# return True #TODO: figure out why this causes memory issues on Nvidia and possibly others
# return True #TODO: figure out why this causes memory issues on Nvidia and possibly others
def
force_channels_last
():
if
args
.
force_channels_last
:
return
True
#TODO
return
False
def
cast_to_device
(
tensor
,
device
,
dtype
,
copy
=
False
):
def
cast_to_device
(
tensor
,
device
,
dtype
,
copy
=
False
):
device_supports_cast
=
False
device_supports_cast
=
False
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment