model_management.py 13.8 KB
Newer Older
1
2
import psutil
from enum import Enum
comfyanonymous's avatar
comfyanonymous committed
3
from comfy.cli_args import args
4
import torch
5

6
7
8
9
10
11
12
class VRAMState(Enum):
    CPU = 0
    NO_VRAM = 1
    LOW_VRAM = 2
    NORMAL_VRAM = 3
    HIGH_VRAM = 4
    MPS = 5
13

14
15
16
# Determine VRAM State
vram_state = VRAMState.NORMAL_VRAM
set_vram_to = VRAMState.NORMAL_VRAM
17

18
total_vram = 0
19

20
lowvram_available = True
藍+85CD's avatar
藍+85CD committed
21
xpu_available = False
22

23
directml_enabled = False
24
if args.directml is not None:
25
26
    import torch_directml
    directml_enabled = True
27
28
29
30
31
32
    device_index = args.directml
    if device_index < 0:
        directml_device = torch_directml.device()
    else:
        directml_device = torch_directml.device(device_index)
    print("Using directml with device:", torch_directml.device_name(device_index))
33
    # torch_directml.disable_tiled_resources(True)
34
    lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
35

36
try:
37
38
39
    import intel_extension_for_pytorch as ipex
    if torch.xpu.is_available():
        xpu_available = True
40
41
42
except:
    pass

43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def get_torch_device():
    global xpu_available
    global directml_enabled
    if directml_enabled:
        global directml_device
        return directml_device
    if vram_state == VRAMState.MPS:
        return torch.device("mps")
    if vram_state == VRAMState.CPU:
        return torch.device("cpu")
    else:
        if xpu_available:
            return torch.device("xpu")
        else:
            return torch.device(torch.cuda.current_device())

def get_total_memory(dev=None, torch_total_too=False):
    global xpu_available
    global directml_enabled
    if dev is None:
        dev = get_torch_device()

    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
        mem_total = psutil.virtual_memory().total
        mem_total_torch = mem_total
    else:
        if directml_enabled:
            mem_total = 1024 * 1024 * 1024 #TODO
            mem_total_torch = mem_total
        elif xpu_available:
            mem_total = torch.xpu.get_device_properties(dev).total_memory
            mem_total_torch = mem_total
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_reserved = stats['reserved_bytes.all.current']
            _, mem_total_cuda = torch.cuda.mem_get_info(dev)
            mem_total_torch = mem_reserved
            mem_total = mem_total_cuda

    if torch_total_too:
        return (mem_total, mem_total_torch)
    else:
        return mem_total

total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024)
print("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
if not args.normalvram and not args.cpu:
    if lowvram_available and total_vram <= 4096:
        print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
        set_vram_to = VRAMState.LOW_VRAM
    elif total_vram > total_ram * 1.1 and total_vram > 14336:
        print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
        vram_state = VRAMState.HIGH_VRAM

98
99
100
101
102
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

103
104
XFORMERS_VERSION = ""
XFORMERS_ENABLED_VAE = True
105
106
if args.disable_xformers:
    XFORMERS_IS_AVAILABLE = False
107
108
109
110
else:
    try:
        import xformers
        import xformers.ops
111
        XFORMERS_IS_AVAILABLE = True
112
113
114
115
116
117
118
119
120
121
122
        try:
            XFORMERS_VERSION = xformers.version.__version__
            print("xformers version:", XFORMERS_VERSION)
            if XFORMERS_VERSION.startswith("0.0.18"):
                print()
                print("WARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
                print("Please downgrade or upgrade xformers to a different version.")
                print()
                XFORMERS_ENABLED_VAE = False
        except:
            pass
123
    except:
124
        XFORMERS_IS_AVAILABLE = False
125

126
127
ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention
if ENABLE_PYTORCH_ATTENTION:
128
129
130
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
131
    XFORMERS_IS_AVAILABLE = False
132

133
134
if args.lowvram:
    set_vram_to = VRAMState.LOW_VRAM
135
    lowvram_available = True
136
137
138
139
elif args.novram:
    set_vram_to = VRAMState.NO_VRAM
elif args.highvram:
    vram_state = VRAMState.HIGH_VRAM
140

141
142
143
144
145
FORCE_FP32 = False
if args.force_fp32:
    print("Forcing FP32, if this improves things please report it.")
    FORCE_FP32 = True

146

147
148

if lowvram_available:
149
150
    try:
        import accelerate
151
152
        if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
            vram_state = set_vram_to
153
154
155
    except Exception as e:
        import traceback
        print(traceback.format_exc())
156
157
        print("ERROR: LOW VRAM MODE NEEDS accelerate.")
        lowvram_available = False
158

159

160
161
try:
    if torch.backends.mps.is_available():
162
        vram_state = VRAMState.MPS
163
164
165
except:
    pass

166
167
if args.cpu:
    vram_state = VRAMState.CPU
168

169
print(f"Set vram state to: {vram_state.name}")
170

171
172
def get_torch_device_name(device):
    if hasattr(device, 'type'):
173
174
175
176
177
178
        if device.type == "cuda":
            return "{} {}".format(device, torch.cuda.get_device_name(device))
        else:
            return "{}".format(device.type)
    else:
        return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
179
180

try:
181
    print("Device:", get_torch_device_name(get_torch_device()))
182
183
184
except:
    print("Could not pick default device.")

185
186

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
187
current_gpu_controlnets = []
188

189
190
191
model_accelerated = False


192
193
def unload_model():
    global current_loaded_model
194
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
195
    global current_gpu_controlnets
196
197
    global vram_state

198
    if current_loaded_model is not None:
199
200
201
202
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

203
        #never unload models from GPU on high vram
204
        if vram_state != VRAMState.HIGH_VRAM:
205
            current_loaded_model.model.cpu()
206
            current_loaded_model.model_patches_to("cpu")
207
208
        current_loaded_model.unpatch_model()
        current_loaded_model = None
209

210
    if vram_state != VRAMState.HIGH_VRAM:
211
212
213
214
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
215
216
217
218


def load_model_gpu(model):
    global current_loaded_model
219
220
221
    global vram_state
    global model_accelerated

222
223
224
225
226
227
228
229
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
230

231
232
233
234
235
236
237
    torch_dev = get_torch_device()
    model.model_patches_to(torch_dev)

    vram_set_state = vram_state
    if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM):
        model_size = model.model_size()
        current_free_mem = get_free_memory(torch_dev)
238
        lowvram_model_memory = int(max(256 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 ))
239
240
241
        if model_size > (current_free_mem - (512 * 1024 * 1024)): #only switch to lowvram if really necessary
            vram_set_state = VRAMState.LOW_VRAM

242
    current_loaded_model = model
243
244

    if vram_set_state == VRAMState.CPU:
245
        pass
246
    elif vram_set_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
247
248
249
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
250
    elif vram_set_state == VRAMState.NORMAL_VRAM or vram_set_state == VRAMState.HIGH_VRAM:
251
        model_accelerated = False
comfyanonymous's avatar
comfyanonymous committed
252
        real_model.to(get_torch_device())
253
    else:
254
        if vram_set_state == VRAMState.NO_VRAM:
255
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
256
257
        elif vram_set_state == VRAMState.LOW_VRAM:
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(lowvram_model_memory // (1024 * 1024)), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
258

comfyanonymous's avatar
comfyanonymous committed
259
        accelerate.dispatch_model(real_model, device_map=device_map, main_device=get_torch_device())
260
        model_accelerated = True
261
    return current_loaded_model
262

263
def load_controlnet_gpu(control_models):
comfyanonymous's avatar
comfyanonymous committed
264
    global current_gpu_controlnets
265
    global vram_state
266
    if vram_state == VRAMState.CPU:
267
        return
268

269
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
270
271
272
        for m in control_models:
            if hasattr(m, 'set_lowvram'):
                m.set_lowvram(True)
273
274
275
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

276
277
278
279
    models = []
    for m in control_models:
        models += m.get_models()

comfyanonymous's avatar
comfyanonymous committed
280
281
282
283
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

284
    device = get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
285
286
    current_gpu_controlnets = []
    for m in models:
287
        current_gpu_controlnets.append(m.to(device))
comfyanonymous's avatar
comfyanonymous committed
288

289

290
291
def load_if_low_vram(model):
    global vram_state
292
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
comfyanonymous's avatar
comfyanonymous committed
293
        return model.to(get_torch_device())
294
295
296
297
    return model

def unload_if_low_vram(model):
    global vram_state
298
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
299
300
301
        return model.cpu()
    return model

302
303
304
305
def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
306

307

308
def xformers_enabled():
309
310
    global xpu_available
    global directml_enabled
311
    if vram_state == VRAMState.CPU:
312
        return False
313
314
315
316
    if xpu_available:
        return False
    if directml_enabled:
        return False
317
    return XFORMERS_IS_AVAILABLE
318

319
320
321
322
323

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
324
325

    return XFORMERS_ENABLED_VAE
326

327
def pytorch_attention_enabled():
328
    global ENABLE_PYTORCH_ATTENTION
329
330
    return ENABLE_PYTORCH_ATTENTION

331
332
333
334
335
336
337
338
def pytorch_attention_flash_attention():
    global ENABLE_PYTORCH_ATTENTION
    if ENABLE_PYTORCH_ATTENTION:
        #TODO: more reliable way of checking for flash attention?
        if torch.version.cuda: #pytorch flash attention only works on Nvidia
            return True
    return False

339
def get_free_memory(dev=None, torch_free_too=False):
340
    global xpu_available
341
    global directml_enabled
342
    if dev is None:
343
        dev = get_torch_device()
344

Yurii Mazurevich's avatar
Yurii Mazurevich committed
345
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
346
347
348
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
349
350
351
352
        if directml_enabled:
            mem_free_total = 1024 * 1024 * 1024 #TODO
            mem_free_torch = mem_free_total
        elif xpu_available:
353
354
355
356
357
358
359
360
361
            mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev)
            mem_free_torch = mem_free_total
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
            mem_free_torch = mem_reserved - mem_active
            mem_free_total = mem_free_cuda + mem_free_torch
362
363
364
365
366

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
367
368
369

def maximum_batch_area():
    global vram_state
370
    if vram_state == VRAMState.NO_VRAM:
371
372
373
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
374
    if xformers_enabled() or pytorch_attention_flash_attention():
375
        #TODO: this needs to be tweaked
376
        area = 20 * memory_free
377
378
379
    else:
        #TODO: this formula is because AMD sucks and has memory management issues which might be fixed in the future
        area = ((memory_free - 1024) * 0.9) / (0.6)
380
    return int(max(area, 0))
381
382
383

def cpu_mode():
    global vram_state
384
    return vram_state == VRAMState.CPU
385

Yurii Mazurevich's avatar
Yurii Mazurevich committed
386
387
def mps_mode():
    global vram_state
388
    return vram_state == VRAMState.MPS
Yurii Mazurevich's avatar
Yurii Mazurevich committed
389

390
def should_use_fp16():
391
    global xpu_available
392
393
    global directml_enabled

394
395
396
    if FORCE_FP32:
        return False

397
398
399
    if directml_enabled:
        return False

400
    if cpu_mode() or mps_mode() or xpu_available:
401
402
403
404
405
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
406
    props = torch.cuda.get_device_properties("cuda")
407
408
409
410
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
411
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
412
413
414
415
416
417
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

418
419
def soft_empty_cache():
    global xpu_available
comfyanonymous's avatar
comfyanonymous committed
420
421
422
423
    global vram_state
    if vram_state == VRAMState.MPS:
        torch.mps.empty_cache()
    elif xpu_available:
424
425
426
427
428
429
        torch.xpu.empty_cache()
    elif torch.cuda.is_available():
        if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()

430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()