Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
5f573626
"ppocr/git@developer.sourcefind.cn:wangsen/paddle_dbnet.git" did not exist on "d43f65f5382e38e78f2adeaed24ee157694eb82a"
Commit
5f573626
authored
Jul 16, 2023
by
comfyanonymous
Browse files
Lower lora ram usage when in normal vram mode.
parent
490771b7
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
19 additions
and
13 deletions
+19
-13
comfy/model_management.py
comfy/model_management.py
+10
-10
comfy/sd.py
comfy/sd.py
+9
-3
No files found.
comfy/model_management.py
View file @
5f573626
...
@@ -233,10 +233,9 @@ def unload_model():
...
@@ -233,10 +233,9 @@ def unload_model():
accelerate
.
hooks
.
remove_hook_from_submodules
(
current_loaded_model
.
model
)
accelerate
.
hooks
.
remove_hook_from_submodules
(
current_loaded_model
.
model
)
model_accelerated
=
False
model_accelerated
=
False
current_loaded_model
.
unpatch_model
()
current_loaded_model
.
model
.
to
(
current_loaded_model
.
offload_device
)
current_loaded_model
.
model
.
to
(
current_loaded_model
.
offload_device
)
current_loaded_model
.
model_patches_to
(
current_loaded_model
.
offload_device
)
current_loaded_model
.
model_patches_to
(
current_loaded_model
.
offload_device
)
current_loaded_model
.
unpatch_model
()
current_loaded_model
=
None
current_loaded_model
=
None
if
vram_state
!=
VRAMState
.
HIGH_VRAM
:
if
vram_state
!=
VRAMState
.
HIGH_VRAM
:
soft_empty_cache
()
soft_empty_cache
()
...
@@ -282,14 +281,6 @@ def load_model_gpu(model):
...
@@ -282,14 +281,6 @@ def load_model_gpu(model):
elif
vram_set_state
==
VRAMState
.
NORMAL_VRAM
or
vram_set_state
==
VRAMState
.
HIGH_VRAM
or
vram_set_state
==
VRAMState
.
SHARED
:
elif
vram_set_state
==
VRAMState
.
NORMAL_VRAM
or
vram_set_state
==
VRAMState
.
HIGH_VRAM
or
vram_set_state
==
VRAMState
.
SHARED
:
model_accelerated
=
False
model_accelerated
=
False
real_model
.
to
(
torch_dev
)
real_model
.
to
(
torch_dev
)
else
:
if
vram_set_state
==
VRAMState
.
NO_VRAM
:
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"256MiB"
,
"cpu"
:
"16GiB"
})
elif
vram_set_state
==
VRAMState
.
LOW_VRAM
:
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"{}MiB"
.
format
(
lowvram_model_memory
//
(
1024
*
1024
)),
"cpu"
:
"16GiB"
})
accelerate
.
dispatch_model
(
real_model
,
device_map
=
device_map
,
main_device
=
torch_dev
)
model_accelerated
=
True
try
:
try
:
real_model
=
model
.
patch_model
()
real_model
=
model
.
patch_model
()
...
@@ -298,6 +289,15 @@ def load_model_gpu(model):
...
@@ -298,6 +289,15 @@ def load_model_gpu(model):
unload_model
()
unload_model
()
raise
e
raise
e
if
vram_set_state
==
VRAMState
.
NO_VRAM
:
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"256MiB"
,
"cpu"
:
"16GiB"
})
accelerate
.
dispatch_model
(
real_model
,
device_map
=
device_map
,
main_device
=
torch_dev
)
model_accelerated
=
True
elif
vram_set_state
==
VRAMState
.
LOW_VRAM
:
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"{}MiB"
.
format
(
lowvram_model_memory
//
(
1024
*
1024
)),
"cpu"
:
"16GiB"
})
accelerate
.
dispatch_model
(
real_model
,
device_map
=
device_map
,
main_device
=
torch_dev
)
model_accelerated
=
True
return
current_loaded_model
return
current_loaded_model
def
load_controlnet_gpu
(
control_models
):
def
load_controlnet_gpu
(
control_models
):
...
...
comfy/sd.py
View file @
5f573626
...
@@ -428,11 +428,17 @@ class ModelPatcher:
...
@@ -428,11 +428,17 @@ class ModelPatcher:
return
weight
return
weight
def
unpatch_model
(
self
):
def
unpatch_model
(
self
):
model_sd
=
self
.
model_state_dict
()
keys
=
list
(
self
.
backup
.
keys
())
keys
=
list
(
self
.
backup
.
keys
())
def
set_attr
(
obj
,
attr
,
value
):
attrs
=
attr
.
split
(
"."
)
for
name
in
attrs
[:
-
1
]:
obj
=
getattr
(
obj
,
name
)
prev
=
getattr
(
obj
,
attrs
[
-
1
])
setattr
(
obj
,
attrs
[
-
1
],
torch
.
nn
.
Parameter
(
value
))
del
prev
for
k
in
keys
:
for
k
in
keys
:
model_sd
[
k
][:]
=
self
.
backup
[
k
]
set_attr
(
self
.
model
,
k
,
self
.
backup
[
k
])
del
self
.
backup
[
k
]
self
.
backup
=
{}
self
.
backup
=
{}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment