Commit 3aee33b5 authored by comfyanonymous's avatar comfyanonymous
Browse files

Add --disable-smart-memory for those that want the old behaviour.

parent 2be27427
......@@ -82,6 +82,9 @@ vram_group.add_argument("--novram", action="store_true", help="When lowvram isn'
vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).")
......
......@@ -202,6 +202,10 @@ if cpu_state == CPUState.MPS:
print(f"Set vram state to: {vram_state.name}")
DISABLE_SMART_MEMORY = args.disable_smart_memory
if DISABLE_SMART_MEMORY:
print("Disabling smart memory management")
def get_torch_device_name(device):
if hasattr(device, 'type'):
......@@ -289,6 +293,9 @@ def unload_model_clones(model):
def free_memory(memory_required, device, keep_loaded=[]):
unloaded_model = False
for i in range(len(current_loaded_models) -1, -1, -1):
if DISABLE_SMART_MEMORY:
current_free_mem = 0
else:
current_free_mem = get_free_memory(device)
if current_free_mem > memory_required:
break
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment