"vscode:/vscode.git/clone" did not exist on "ffe7b93b60e037b2b1ec056b88cba7c14fc3f9e3"
Commit dc9d1f31 authored by comfyanonymous's avatar comfyanonymous
Browse files

Improvements for OSX.

parent 103c487a
...@@ -334,19 +334,19 @@ def unload_if_low_vram(model): ...@@ -334,19 +334,19 @@ def unload_if_low_vram(model):
return model return model
def unet_offload_device(): def unet_offload_device():
if vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.SHARED: if vram_state == VRAMState.HIGH_VRAM:
return get_torch_device() return get_torch_device()
else: else:
return torch.device("cpu") return torch.device("cpu")
def text_encoder_offload_device(): def text_encoder_offload_device():
if args.gpu_only or vram_state == VRAMState.SHARED: if args.gpu_only:
return get_torch_device() return get_torch_device()
else: else:
return torch.device("cpu") return torch.device("cpu")
def text_encoder_device(): def text_encoder_device():
if args.gpu_only or vram_state == VRAMState.SHARED: if args.gpu_only:
return get_torch_device() return get_torch_device()
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough.
...@@ -360,7 +360,7 @@ def vae_device(): ...@@ -360,7 +360,7 @@ def vae_device():
return get_torch_device() return get_torch_device()
def vae_offload_device(): def vae_offload_device():
if args.gpu_only or vram_state == VRAMState.SHARED: if args.gpu_only:
return get_torch_device() return get_torch_device()
else: else:
return torch.device("cpu") return torch.device("cpu")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment