Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
2326ff12
Commit
2326ff12
authored
Feb 17, 2023
by
comfyanonymous
Browse files
Add: --highvram for when you want models to stay on the vram.
parent
09f1d76e
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
17 additions
and
9 deletions
+17
-9
comfy/model_management.py
comfy/model_management.py
+17
-9
No files found.
comfy/model_management.py
View file @
2326ff12
...
@@ -3,6 +3,7 @@ CPU = 0
...
@@ -3,6 +3,7 @@ CPU = 0
NO_VRAM
=
1
NO_VRAM
=
1
LOW_VRAM
=
2
LOW_VRAM
=
2
NORMAL_VRAM
=
3
NORMAL_VRAM
=
3
HIGH_VRAM
=
4
accelerate_enabled
=
False
accelerate_enabled
=
False
vram_state
=
NORMAL_VRAM
vram_state
=
NORMAL_VRAM
...
@@ -27,10 +28,11 @@ if "--lowvram" in sys.argv:
...
@@ -27,10 +28,11 @@ if "--lowvram" in sys.argv:
set_vram_to
=
LOW_VRAM
set_vram_to
=
LOW_VRAM
if
"--novram"
in
sys
.
argv
:
if
"--novram"
in
sys
.
argv
:
set_vram_to
=
NO_VRAM
set_vram_to
=
NO_VRAM
if
"--highvram"
in
sys
.
argv
:
vram_state
=
HIGH_VRAM
if
set_vram_to
==
LOW_VRAM
or
set_vram_to
==
NO_VRAM
:
if
set_vram_to
!=
NORMAL_VRAM
:
try
:
try
:
import
accelerate
import
accelerate
accelerate_enabled
=
True
accelerate_enabled
=
True
...
@@ -44,7 +46,7 @@ if set_vram_to != NORMAL_VRAM:
...
@@ -44,7 +46,7 @@ if set_vram_to != NORMAL_VRAM:
total_vram_available_mb
=
int
(
max
(
256
,
total_vram_available_mb
))
total_vram_available_mb
=
int
(
max
(
256
,
total_vram_available_mb
))
print
(
"Set vram state to:"
,
[
"CPU"
,
"NO VRAM"
,
"LOW VRAM"
,
"NORMAL VRAM"
][
vram_state
])
print
(
"Set vram state to:"
,
[
"CPU"
,
"NO VRAM"
,
"LOW VRAM"
,
"NORMAL
VRAM"
,
"HIGH
VRAM"
][
vram_state
])
current_loaded_model
=
None
current_loaded_model
=
None
...
@@ -57,18 +59,24 @@ def unload_model():
...
@@ -57,18 +59,24 @@ def unload_model():
global
current_loaded_model
global
current_loaded_model
global
model_accelerated
global
model_accelerated
global
current_gpu_controlnets
global
current_gpu_controlnets
global
vram_state
if
current_loaded_model
is
not
None
:
if
current_loaded_model
is
not
None
:
if
model_accelerated
:
if
model_accelerated
:
accelerate
.
hooks
.
remove_hook_from_submodules
(
current_loaded_model
.
model
)
accelerate
.
hooks
.
remove_hook_from_submodules
(
current_loaded_model
.
model
)
model_accelerated
=
False
model_accelerated
=
False
current_loaded_model
.
model
.
cpu
()
#never unload models from GPU on high vram
if
vram_state
!=
HIGH_VRAM
:
current_loaded_model
.
model
.
cpu
()
current_loaded_model
.
unpatch_model
()
current_loaded_model
.
unpatch_model
()
current_loaded_model
=
None
current_loaded_model
=
None
if
len
(
current_gpu_controlnets
)
>
0
:
for
n
in
current_gpu_controlnets
:
if
vram_state
!=
HIGH_VRAM
:
n
.
cpu
()
if
len
(
current_gpu_controlnets
)
>
0
:
current_gpu_controlnets
=
[]
for
n
in
current_gpu_controlnets
:
n
.
cpu
()
current_gpu_controlnets
=
[]
def
load_model_gpu
(
model
):
def
load_model_gpu
(
model
):
...
@@ -87,7 +95,7 @@ def load_model_gpu(model):
...
@@ -87,7 +95,7 @@ def load_model_gpu(model):
current_loaded_model
=
model
current_loaded_model
=
model
if
vram_state
==
CPU
:
if
vram_state
==
CPU
:
pass
pass
elif
vram_state
==
NORMAL_VRAM
:
elif
vram_state
==
NORMAL_VRAM
or
vram_state
==
HIGH_VRAM
:
model_accelerated
=
False
model_accelerated
=
False
real_model
.
cuda
()
real_model
.
cuda
()
else
:
else
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment