Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
ComfyUI
Commits
c92633ea
Commit
c92633ea
authored
Feb 08, 2023
by
comfyanonymous
Browse files
Auto calculate amount of memory to use for --lowvram
parent
534736b9
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
1 deletion
+10
-1
comfy/model_management.py
comfy/model_management.py
+10
-1
No files found.
comfy/model_management.py
View file @
c92633ea
...
@@ -7,6 +7,8 @@ NORMAL_VRAM = 3
...
@@ -7,6 +7,8 @@ NORMAL_VRAM = 3
accelerate_enabled
=
False
accelerate_enabled
=
False
vram_state
=
NORMAL_VRAM
vram_state
=
NORMAL_VRAM
total_vram_available_mb
=
-
1
import
sys
import
sys
set_vram_to
=
NORMAL_VRAM
set_vram_to
=
NORMAL_VRAM
...
@@ -24,6 +26,13 @@ if set_vram_to != NORMAL_VRAM:
...
@@ -24,6 +26,13 @@ if set_vram_to != NORMAL_VRAM:
import
traceback
import
traceback
print
(
traceback
.
format_exc
())
print
(
traceback
.
format_exc
())
print
(
"ERROR: COULD NOT ENABLE LOW VRAM MODE."
)
print
(
"ERROR: COULD NOT ENABLE LOW VRAM MODE."
)
try
:
import
torch
total_vram_available_mb
=
torch
.
cuda
.
mem_get_info
(
torch
.
cuda
.
current_device
())[
1
]
/
(
1024
*
1024
)
except
:
pass
total_vram_available_mb
=
(
total_vram_available_mb
-
1024
)
//
2
total_vram_available_mb
=
int
(
max
(
256
,
total_vram_available_mb
))
print
(
"Set vram state to:"
,
[
"CPU"
,
"NO VRAM"
,
"LOW VRAM"
,
"NORMAL VRAM"
][
vram_state
])
print
(
"Set vram state to:"
,
[
"CPU"
,
"NO VRAM"
,
"LOW VRAM"
,
"NORMAL VRAM"
][
vram_state
])
...
@@ -71,7 +80,7 @@ def load_model_gpu(model):
...
@@ -71,7 +80,7 @@ def load_model_gpu(model):
if
vram_state
==
NO_VRAM
:
if
vram_state
==
NO_VRAM
:
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"256MiB"
,
"cpu"
:
"16GiB"
})
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"256MiB"
,
"cpu"
:
"16GiB"
})
elif
vram_state
==
LOW_VRAM
:
elif
vram_state
==
LOW_VRAM
:
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"
1GiB"
,
"cpu"
:
"16GiB"
})
device_map
=
accelerate
.
infer_auto_device_map
(
real_model
,
max_memory
=
{
0
:
"
{}MiB"
.
format
(
total_vram_available_mb
)
,
"cpu"
:
"16GiB"
})
accelerate
.
dispatch_model
(
real_model
,
device_map
=
device_map
,
main_device
=
"cuda"
)
accelerate
.
dispatch_model
(
real_model
,
device_map
=
device_map
,
main_device
=
"cuda"
)
model_accelerated
=
True
model_accelerated
=
True
return
current_loaded_model
return
current_loaded_model
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment