model_management.py 27.4 KB
Newer Older
1
import psutil
2
import logging
3
from enum import Enum
comfyanonymous's avatar
comfyanonymous committed
4
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
5
import comfy.utils
6
import torch
comfyanonymous's avatar
comfyanonymous committed
7
import sys
8

9
class VRAMState(Enum):
10
11
    DISABLED = 0    #No vram present: no need to move models to vram
    NO_VRAM = 1     #Very low vram: enable all the options to save vram
12
13
14
    LOW_VRAM = 2
    NORMAL_VRAM = 3
    HIGH_VRAM = 4
15
    SHARED = 5      #No dedicated vram: memory shared between CPU and GPU but models still need to be moved between both.
16
17
18
19
20

class CPUState(Enum):
    GPU = 0
    CPU = 1
    MPS = 2
21

22
23
24
# Determine VRAM State
vram_state = VRAMState.NORMAL_VRAM
set_vram_to = VRAMState.NORMAL_VRAM
25
cpu_state = CPUState.GPU
26

27
total_vram = 0
28

29
lowvram_available = True
藍+85CD's avatar
藍+85CD committed
30
xpu_available = False
31

32
if args.deterministic:
comfyanonymous's avatar
comfyanonymous committed
33
    logging.info("Using deterministic algorithms for pytorch")
34
35
    torch.use_deterministic_algorithms(True, warn_only=True)

36
directml_enabled = False
37
if args.directml is not None:
38
39
    import torch_directml
    directml_enabled = True
40
41
42
43
44
    device_index = args.directml
    if device_index < 0:
        directml_device = torch_directml.device()
    else:
        directml_device = torch_directml.device(device_index)
comfyanonymous's avatar
comfyanonymous committed
45
    logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index)))
46
    # torch_directml.disable_tiled_resources(True)
47
    lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
48

49
try:
50
51
52
    import intel_extension_for_pytorch as ipex
    if torch.xpu.is_available():
        xpu_available = True
53
54
55
except:
    pass

56
57
58
try:
    if torch.backends.mps.is_available():
        cpu_state = CPUState.MPS
KarryCharon's avatar
KarryCharon committed
59
        import torch.mps
60
61
62
63
64
65
except:
    pass

if args.cpu:
    cpu_state = CPUState.CPU

66
67
def is_intel_xpu():
    global cpu_state
68
    global xpu_available
69
70
71
72
73
74
    if cpu_state == CPUState.GPU:
        if xpu_available:
            return True
    return False

def get_torch_device():
75
    global directml_enabled
76
    global cpu_state
77
78
79
    if directml_enabled:
        global directml_device
        return directml_device
80
    if cpu_state == CPUState.MPS:
81
        return torch.device("mps")
82
    if cpu_state == CPUState.CPU:
83
84
        return torch.device("cpu")
    else:
85
        if is_intel_xpu():
86
            return torch.device("xpu", torch.xpu.current_device())
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
        else:
            return torch.device(torch.cuda.current_device())

def get_total_memory(dev=None, torch_total_too=False):
    global directml_enabled
    if dev is None:
        dev = get_torch_device()

    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
        mem_total = psutil.virtual_memory().total
        mem_total_torch = mem_total
    else:
        if directml_enabled:
            mem_total = 1024 * 1024 * 1024 #TODO
            mem_total_torch = mem_total
102
        elif is_intel_xpu():
103
104
105
            stats = torch.xpu.memory_stats(dev)
            mem_reserved = stats['reserved_bytes.all.current']
            mem_total_torch = mem_reserved
106
            mem_total = torch.xpu.get_device_properties(dev).total_memory
107
108
109
110
111
112
113
114
115
116
117
118
119
120
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_reserved = stats['reserved_bytes.all.current']
            _, mem_total_cuda = torch.cuda.mem_get_info(dev)
            mem_total_torch = mem_reserved
            mem_total = mem_total_cuda

    if torch_total_too:
        return (mem_total, mem_total_torch)
    else:
        return mem_total

total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024)
comfyanonymous's avatar
comfyanonymous committed
121
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
122

123
124
125
126
127
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

128
129
XFORMERS_VERSION = ""
XFORMERS_ENABLED_VAE = True
130
131
if args.disable_xformers:
    XFORMERS_IS_AVAILABLE = False
132
133
134
135
else:
    try:
        import xformers
        import xformers.ops
136
        XFORMERS_IS_AVAILABLE = True
137
138
139
140
        try:
            XFORMERS_IS_AVAILABLE = xformers._has_cpp_library
        except:
            pass
141
142
        try:
            XFORMERS_VERSION = xformers.version.__version__
comfyanonymous's avatar
comfyanonymous committed
143
            logging.info("xformers version: {}".format(XFORMERS_VERSION))
144
            if XFORMERS_VERSION.startswith("0.0.18"):
145
146
                logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
                logging.warning("Please downgrade or upgrade xformers to a different version.\n")
147
148
149
                XFORMERS_ENABLED_VAE = False
        except:
            pass
150
    except:
151
        XFORMERS_IS_AVAILABLE = False
152

153
154
155
156
157
def is_nvidia():
    global cpu_state
    if cpu_state == CPUState.GPU:
        if torch.version.cuda:
            return True
158
    return False
159

160
161
162
163
164
ENABLE_PYTORCH_ATTENTION = False
if args.use_pytorch_cross_attention:
    ENABLE_PYTORCH_ATTENTION = True
    XFORMERS_IS_AVAILABLE = False

165
VAE_DTYPE = torch.float32
166

167
168
169
170
try:
    if is_nvidia():
        torch_version = torch.version.__version__
        if int(torch_version[0]) >= 2:
171
            if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
172
                ENABLE_PYTORCH_ATTENTION = True
173
            if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
174
                VAE_DTYPE = torch.bfloat16
175
176
177
    if is_intel_xpu():
        if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
            ENABLE_PYTORCH_ATTENTION = True
178
179
180
except:
    pass

181
182
183
if is_intel_xpu():
    VAE_DTYPE = torch.bfloat16

184
185
186
if args.cpu_vae:
    VAE_DTYPE = torch.float32

187
188
189
190
191
192
193
if args.fp16_vae:
    VAE_DTYPE = torch.float16
elif args.bf16_vae:
    VAE_DTYPE = torch.bfloat16
elif args.fp32_vae:
    VAE_DTYPE = torch.float32

194

195
if ENABLE_PYTORCH_ATTENTION:
196
197
198
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
199

200
201
if args.lowvram:
    set_vram_to = VRAMState.LOW_VRAM
202
    lowvram_available = True
203
204
elif args.novram:
    set_vram_to = VRAMState.NO_VRAM
205
elif args.highvram or args.gpu_only:
206
    vram_state = VRAMState.HIGH_VRAM
207

208
FORCE_FP32 = False
209
FORCE_FP16 = False
210
if args.force_fp32:
comfyanonymous's avatar
comfyanonymous committed
211
    logging.info("Forcing FP32, if this improves things please report it.")
212
213
    FORCE_FP32 = True

214
if args.force_fp16:
comfyanonymous's avatar
comfyanonymous committed
215
    logging.info("Forcing FP16.")
216
217
    FORCE_FP16 = True

218
if lowvram_available:
219
220
    if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
        vram_state = set_vram_to
221

222

223
224
if cpu_state != CPUState.GPU:
    vram_state = VRAMState.DISABLED
225

226
227
if cpu_state == CPUState.MPS:
    vram_state = VRAMState.SHARED
228

comfyanonymous's avatar
comfyanonymous committed
229
logging.info(f"Set vram state to: {vram_state.name}")
230

231
232
233
DISABLE_SMART_MEMORY = args.disable_smart_memory

if DISABLE_SMART_MEMORY:
comfyanonymous's avatar
comfyanonymous committed
234
    logging.info("Disabling smart memory management")
235

236
237
def get_torch_device_name(device):
    if hasattr(device, 'type'):
238
        if device.type == "cuda":
239
240
241
242
243
            try:
                allocator_backend = torch.cuda.get_allocator_backend()
            except:
                allocator_backend = ""
            return "{} {} : {}".format(device, torch.cuda.get_device_name(device), allocator_backend)
244
245
        else:
            return "{}".format(device.type)
246
    elif is_intel_xpu():
247
        return "{} {}".format(device, torch.xpu.get_device_name(device))
248
249
    else:
        return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
250
251

try:
comfyanonymous's avatar
comfyanonymous committed
252
    logging.info("Device: {}".format(get_torch_device_name(get_torch_device())))
253
except:
254
    logging.warning("Could not pick default device.")
255

comfyanonymous's avatar
comfyanonymous committed
256
logging.info("VAE dtype: {}".format(VAE_DTYPE))
257

comfyanonymous's avatar
comfyanonymous committed
258
current_loaded_models = []
259

260
261
262
263
264
265
266
267
def module_size(module):
    module_mem = 0
    sd = module.state_dict()
    for k in sd:
        t = sd[k]
        module_mem += t.nelement() * t.element_size()
    return module_mem

comfyanonymous's avatar
comfyanonymous committed
268
269
270
271
class LoadedModel:
    def __init__(self, model):
        self.model = model
        self.device = model.load_device
272
        self.weights_loaded = False
273
        self.real_model = None
274

comfyanonymous's avatar
comfyanonymous committed
275
276
    def model_memory(self):
        return self.model.model_size()
277

comfyanonymous's avatar
comfyanonymous committed
278
279
280
281
282
    def model_memory_required(self, device):
        if device == self.model.current_device:
            return 0
        else:
            return self.model_memory()
283

284
    def model_load(self, lowvram_model_memory=0, force_patch_weights=False):
285
        patch_model_to = self.device
286

comfyanonymous's avatar
comfyanonymous committed
287
288
        self.model.model_patches_to(self.device)
        self.model.model_patches_to(self.model.model_dtype())
289

290
291
        load_weights = not self.weights_loaded

comfyanonymous's avatar
comfyanonymous committed
292
        try:
293
            if lowvram_model_memory > 0 and load_weights:
294
                self.real_model = self.model.patch_model_lowvram(device_to=patch_model_to, lowvram_model_memory=lowvram_model_memory, force_patch_weights=force_patch_weights)
295
            else:
296
                self.real_model = self.model.patch_model(device_to=patch_model_to, patch_weights=load_weights)
comfyanonymous's avatar
comfyanonymous committed
297
298
299
300
        except Exception as e:
            self.model.unpatch_model(self.model.offload_device)
            self.model_unload()
            raise e
301

302
        if is_intel_xpu() and not args.disable_ipex_optimize:
303
            self.real_model = ipex.optimize(self.real_model.eval(), graph_mode=True, concat_linear=True)
304

305
        self.weights_loaded = True
comfyanonymous's avatar
comfyanonymous committed
306
        return self.real_model
307

308
309
310
311
312
    def should_reload_model(self, force_patch_weights=False):
        if force_patch_weights and self.model.lowvram_patch_counter > 0:
            return True
        return False

313
314
    def model_unload(self, unpatch_weights=True):
        self.model.unpatch_model(self.model.offload_device, unpatch_weights=unpatch_weights)
comfyanonymous's avatar
comfyanonymous committed
315
        self.model.model_patches_to(self.model.offload_device)
316
        self.weights_loaded = self.weights_loaded and not unpatch_weights
317
        self.real_model = None
318

comfyanonymous's avatar
comfyanonymous committed
319
320
    def __eq__(self, other):
        return self.model is other.model
comfyanonymous's avatar
comfyanonymous committed
321

comfyanonymous's avatar
comfyanonymous committed
322
323
324
def minimum_inference_memory():
    return (1024 * 1024 * 1024)

325
def unload_model_clones(model, unload_weights_only=True, force_unload=True):
comfyanonymous's avatar
comfyanonymous committed
326
327
328
329
330
    to_unload = []
    for i in range(len(current_loaded_models)):
        if model.is_clone(current_loaded_models[i].model):
            to_unload = [i] + to_unload

331
    if len(to_unload) == 0:
332
        return True
333
334

    same_weights = 0
comfyanonymous's avatar
comfyanonymous committed
335
    for i in to_unload:
336
337
338
339
340
341
342
343
        if model.clone_has_same_weights(current_loaded_models[i].model):
            same_weights += 1

    if same_weights == len(to_unload):
        unload_weight = False
    else:
        unload_weight = True

344
345
346
    if not force_unload:
        if unload_weights_only and unload_weight == False:
            return None
347
348
349
350
351

    for i in to_unload:
        logging.debug("unload clone {} {}".format(i, unload_weight))
        current_loaded_models.pop(i).model_unload(unpatch_weights=unload_weight)

352
    return unload_weight
comfyanonymous's avatar
comfyanonymous committed
353
354

def free_memory(memory_required, device, keep_loaded=[]):
355
356
357
    unloaded_model = []
    can_unload = []

comfyanonymous's avatar
comfyanonymous committed
358
359
360
361
    for i in range(len(current_loaded_models) -1, -1, -1):
        shift_model = current_loaded_models[i]
        if shift_model.device == device:
            if shift_model not in keep_loaded:
362
363
364
365
366
367
368
369
370
371
372
373
                can_unload.append((sys.getrefcount(shift_model.model), shift_model.model_memory(), i))

    for x in sorted(can_unload):
        i = x[-1]
        if not DISABLE_SMART_MEMORY:
            if get_free_memory(device) > memory_required:
                break
        current_loaded_models[i].model_unload()
        unloaded_model.append(i)

    for i in sorted(unloaded_model, reverse=True):
        current_loaded_models.pop(i)
comfyanonymous's avatar
comfyanonymous committed
374

375
    if len(unloaded_model) > 0:
comfyanonymous's avatar
comfyanonymous committed
376
        soft_empty_cache()
377
378
379
380
381
    else:
        if vram_state != VRAMState.HIGH_VRAM:
            mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True)
            if mem_free_torch > mem_free_total * 0.25:
                soft_empty_cache()
comfyanonymous's avatar
comfyanonymous committed
382

383
def load_models_gpu(models, memory_required=0, force_patch_weights=False):
384
385
    global vram_state

comfyanonymous's avatar
comfyanonymous committed
386
387
388
    inference_memory = minimum_inference_memory()
    extra_mem = max(inference_memory, memory_required)

389
390
    models = set(models)

comfyanonymous's avatar
comfyanonymous committed
391
392
393
394
    models_to_load = []
    models_already_loaded = []
    for x in models:
        loaded_model = LoadedModel(x)
395
        loaded = None
comfyanonymous's avatar
comfyanonymous committed
396

397
398
399
400
401
402
403
404
405
406
407
408
409
410
        try:
            loaded_model_index = current_loaded_models.index(loaded_model)
        except:
            loaded_model_index = None

        if loaded_model_index is not None:
            loaded = current_loaded_models[loaded_model_index]
            if loaded.should_reload_model(force_patch_weights=force_patch_weights): #TODO: cleanup this model reload logic
                current_loaded_models.pop(loaded_model_index).model_unload(unpatch_weights=True)
                loaded = None
            else:
                models_already_loaded.append(loaded)

        if loaded is None:
411
            if hasattr(x, "model"):
comfyanonymous's avatar
comfyanonymous committed
412
                logging.info(f"Requested to load {x.model.__class__.__name__}")
comfyanonymous's avatar
comfyanonymous committed
413
414
415
416
417
418
419
            models_to_load.append(loaded_model)

    if len(models_to_load) == 0:
        devs = set(map(lambda a: a.device, models_already_loaded))
        for d in devs:
            if d != torch.device("cpu"):
                free_memory(extra_mem, d, models_already_loaded)
420
421
        return

comfyanonymous's avatar
comfyanonymous committed
422
    logging.info(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
423

comfyanonymous's avatar
comfyanonymous committed
424
425
    total_memory_required = {}
    for loaded_model in models_to_load:
426
427
        if unload_model_clones(loaded_model.model, unload_weights_only=True, force_unload=False) == True:#unload clones where the weights are different
            total_memory_required[loaded_model.device] = total_memory_required.get(loaded_model.device, 0) + loaded_model.model_memory_required(loaded_model.device)
comfyanonymous's avatar
comfyanonymous committed
428

comfyanonymous's avatar
comfyanonymous committed
429
430
431
    for device in total_memory_required:
        if device != torch.device("cpu"):
            free_memory(total_memory_required[device] * 1.3 + extra_mem, device, models_already_loaded)
comfyanonymous's avatar
comfyanonymous committed
432

433
    for loaded_model in models_to_load:
434
435
436
        weights_unloaded = unload_model_clones(loaded_model.model, unload_weights_only=False, force_unload=False) #unload the rest of the clones where the weights can stay loaded
        if weights_unloaded is not None:
            loaded_model.weights_loaded = not weights_unloaded
437

comfyanonymous's avatar
comfyanonymous committed
438
439
440
441
442
443
444
445
446
447
448
    for loaded_model in models_to_load:
        model = loaded_model.model
        torch_dev = model.load_device
        if is_device_cpu(torch_dev):
            vram_set_state = VRAMState.DISABLED
        else:
            vram_set_state = vram_state
        lowvram_model_memory = 0
        if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM):
            model_size = loaded_model.model_memory_required(torch_dev)
            current_free_mem = get_free_memory(torch_dev)
449
            lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 ))
450
            if model_size <= (current_free_mem - inference_memory): #only switch to lowvram if really necessary
comfyanonymous's avatar
comfyanonymous committed
451
                lowvram_model_memory = 0
452

comfyanonymous's avatar
comfyanonymous committed
453
        if vram_set_state == VRAMState.NO_VRAM:
454
            lowvram_model_memory = 64 * 1024 * 1024
455

456
        cur_loaded_model = loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
comfyanonymous's avatar
comfyanonymous committed
457
458
459
460
461
462
463
        current_loaded_models.insert(0, loaded_model)
    return


def load_model_gpu(model):
    return load_models_gpu([model])

464
def cleanup_models(keep_clone_weights_loaded=False):
comfyanonymous's avatar
comfyanonymous committed
465
466
467
    to_delete = []
    for i in range(len(current_loaded_models)):
        if sys.getrefcount(current_loaded_models[i].model) <= 2:
468
469
470
471
472
            if not keep_clone_weights_loaded:
                to_delete = [i] + to_delete
            #TODO: find a less fragile way to do this.
            elif sys.getrefcount(current_loaded_models[i].real_model) <= 3: #references from .real_model + the .model
                to_delete = [i] + to_delete
comfyanonymous's avatar
comfyanonymous committed
473
474
475
476
477

    for i in to_delete:
        x = current_loaded_models.pop(i)
        x.model_unload()
        del x
478

479
480
481
482
def dtype_size(dtype):
    dtype_size = 4
    if dtype == torch.float16 or dtype == torch.bfloat16:
        dtype_size = 2
483
484
485
486
487
488
489
    elif dtype == torch.float32:
        dtype_size = 4
    else:
        try:
            dtype_size = dtype.itemsize
        except: #Old pytorch doesn't have .itemsize
            pass
490
491
    return dtype_size

492
def unet_offload_device():
comfyanonymous's avatar
comfyanonymous committed
493
    if vram_state == VRAMState.HIGH_VRAM:
494
495
496
497
        return get_torch_device()
    else:
        return torch.device("cpu")

comfyanonymous's avatar
comfyanonymous committed
498
499
500
501
502
503
def unet_inital_load_device(parameters, dtype):
    torch_dev = get_torch_device()
    if vram_state == VRAMState.HIGH_VRAM:
        return torch_dev

    cpu_dev = torch.device("cpu")
504
505
506
    if DISABLE_SMART_MEMORY:
        return cpu_dev

507
    model_size = dtype_size(dtype) * parameters
comfyanonymous's avatar
comfyanonymous committed
508
509
510
511
512
513
514
515

    mem_dev = get_free_memory(torch_dev)
    mem_cpu = get_free_memory(cpu_dev)
    if mem_dev > mem_cpu and model_size < mem_dev:
        return torch_dev
    else:
        return cpu_dev

comfyanonymous's avatar
comfyanonymous committed
516
def unet_dtype(device=None, model_params=0, supported_dtypes=[torch.float16, torch.bfloat16, torch.float32]):
517
518
    if args.bf16_unet:
        return torch.bfloat16
519
520
    if args.fp16_unet:
        return torch.float16
521
522
523
524
    if args.fp8_e4m3fn_unet:
        return torch.float8_e4m3fn
    if args.fp8_e5m2_unet:
        return torch.float8_e5m2
525
    if should_use_fp16(device=device, model_params=model_params, manual_cast=True):
comfyanonymous's avatar
comfyanonymous committed
526
527
        if torch.float16 in supported_dtypes:
            return torch.float16
528
    if should_use_bf16(device, model_params=model_params, manual_cast=True):
comfyanonymous's avatar
comfyanonymous committed
529
530
        if torch.bfloat16 in supported_dtypes:
            return torch.bfloat16
531
532
    return torch.float32

533
# None means no manual cast
comfyanonymous's avatar
comfyanonymous committed
534
def unet_manual_cast(weight_dtype, inference_device, supported_dtypes=[torch.float16, torch.bfloat16, torch.float32]):
535
536
537
    if weight_dtype == torch.float32:
        return None

comfyanonymous's avatar
comfyanonymous committed
538
    fp16_supported = should_use_fp16(inference_device, prioritize_performance=False)
539
540
541
    if fp16_supported and weight_dtype == torch.float16:
        return None

comfyanonymous's avatar
comfyanonymous committed
542
543
544
545
546
    bf16_supported = should_use_bf16(inference_device)
    if bf16_supported and weight_dtype == torch.bfloat16:
        return None

    if fp16_supported and torch.float16 in supported_dtypes:
547
        return torch.float16
comfyanonymous's avatar
comfyanonymous committed
548
549
550

    elif bf16_supported and torch.bfloat16 in supported_dtypes:
        return torch.bfloat16
551
552
553
    else:
        return torch.float32

554
def text_encoder_offload_device():
comfyanonymous's avatar
comfyanonymous committed
555
    if args.gpu_only:
556
557
558
559
        return get_torch_device()
    else:
        return torch.device("cpu")

560
def text_encoder_device():
comfyanonymous's avatar
comfyanonymous committed
561
    if args.gpu_only:
562
        return get_torch_device()
563
    elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
564
        if should_use_fp16(prioritize_performance=False):
565
566
567
            return get_torch_device()
        else:
            return torch.device("cpu")
568
569
570
    else:
        return torch.device("cpu")

571
572
573
574
575
576
577
578
579
580
def text_encoder_dtype(device=None):
    if args.fp8_e4m3fn_text_enc:
        return torch.float8_e4m3fn
    elif args.fp8_e5m2_text_enc:
        return torch.float8_e5m2
    elif args.fp16_text_enc:
        return torch.float16
    elif args.fp32_text_enc:
        return torch.float32

581
582
583
    if is_device_cpu(device):
        return torch.float16

584
585
    return torch.float16

586

587
588
589
590
591
592
def intermediate_device():
    if args.gpu_only:
        return get_torch_device()
    else:
        return torch.device("cpu")

593
def vae_device():
594
595
    if args.cpu_vae:
        return torch.device("cpu")
596
597
598
    return get_torch_device()

def vae_offload_device():
comfyanonymous's avatar
comfyanonymous committed
599
    if args.gpu_only:
600
601
602
603
        return get_torch_device()
    else:
        return torch.device("cpu")

604
def vae_dtype():
605
606
    global VAE_DTYPE
    return VAE_DTYPE
607

608
609
610
611
def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
612

613
614
615
def supports_dtype(device, dtype): #TODO
    if dtype == torch.float32:
        return True
616
    if is_device_cpu(device):
617
618
619
620
621
622
623
        return False
    if dtype == torch.float16:
        return True
    if dtype == torch.bfloat16:
        return True
    return False

624
625
626
def device_supports_non_blocking(device):
    if is_device_mps(device):
        return False #pytorch bug? mps doesn't support non blocking
627
628
    return False
    # return True #TODO: figure out why this causes issues
629

630
631
632
633
634
635
636
def cast_to_device(tensor, device, dtype, copy=False):
    device_supports_cast = False
    if tensor.dtype == torch.float32 or tensor.dtype == torch.float16:
        device_supports_cast = True
    elif tensor.dtype == torch.bfloat16:
        if hasattr(device, 'type') and device.type.startswith("cuda"):
            device_supports_cast = True
637
638
        elif is_intel_xpu():
            device_supports_cast = True
639

640
    non_blocking = device_supports_non_blocking(device)
comfyanonymous's avatar
comfyanonymous committed
641

642
643
644
    if device_supports_cast:
        if copy:
            if tensor.device == device:
comfyanonymous's avatar
comfyanonymous committed
645
646
                return tensor.to(dtype, copy=copy, non_blocking=non_blocking)
            return tensor.to(device, copy=copy, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking)
647
        else:
comfyanonymous's avatar
comfyanonymous committed
648
            return tensor.to(device, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking)
649
    else:
comfyanonymous's avatar
comfyanonymous committed
650
        return tensor.to(device, dtype, copy=copy, non_blocking=non_blocking)
651

652
def xformers_enabled():
653
    global directml_enabled
654
655
    global cpu_state
    if cpu_state != CPUState.GPU:
656
        return False
657
    if is_intel_xpu():
658
659
660
        return False
    if directml_enabled:
        return False
661
    return XFORMERS_IS_AVAILABLE
662

663
664
665
666
667

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
668
669

    return XFORMERS_ENABLED_VAE
670

671
def pytorch_attention_enabled():
672
    global ENABLE_PYTORCH_ATTENTION
673
674
    return ENABLE_PYTORCH_ATTENTION

675
676
677
678
def pytorch_attention_flash_attention():
    global ENABLE_PYTORCH_ATTENTION
    if ENABLE_PYTORCH_ATTENTION:
        #TODO: more reliable way of checking for flash attention?
679
        if is_nvidia(): #pytorch flash attention only works on Nvidia
680
681
682
            return True
    return False

683
def get_free_memory(dev=None, torch_free_too=False):
684
    global directml_enabled
685
    if dev is None:
686
        dev = get_torch_device()
687

Yurii Mazurevich's avatar
Yurii Mazurevich committed
688
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
689
690
691
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
692
693
694
        if directml_enabled:
            mem_free_total = 1024 * 1024 * 1024 #TODO
            mem_free_torch = mem_free_total
695
        elif is_intel_xpu():
696
697
698
699
            stats = torch.xpu.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_torch = mem_reserved - mem_active
700
701
            mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved
            mem_free_total = mem_free_xpu + mem_free_torch
702
703
704
705
706
707
708
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
            mem_free_torch = mem_reserved - mem_active
            mem_free_total = mem_free_cuda + mem_free_torch
709
710
711
712
713

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
714

715
def cpu_mode():
716
717
    global cpu_state
    return cpu_state == CPUState.CPU
718

Yurii Mazurevich's avatar
Yurii Mazurevich committed
719
def mps_mode():
720
721
    global cpu_state
    return cpu_state == CPUState.MPS
Yurii Mazurevich's avatar
Yurii Mazurevich committed
722

723
def is_device_type(device, type):
724
    if hasattr(device, 'type'):
725
        if (device.type == type):
comfyanonymous's avatar
comfyanonymous committed
726
727
728
            return True
    return False

729
730
731
def is_device_cpu(device):
    return is_device_type(device, 'cpu')

comfyanonymous's avatar
comfyanonymous committed
732
def is_device_mps(device):
733
734
735
736
    return is_device_type(device, 'mps')

def is_device_cuda(device):
    return is_device_type(device, 'cuda')
737

738
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
739
740
    global directml_enabled

741
742
743
744
    if device is not None:
        if is_device_cpu(device):
            return False

745
746
747
    if FORCE_FP16:
        return True

748
    if device is not None:
749
        if is_device_mps(device):
750
            return True
751

752
753
754
    if FORCE_FP32:
        return False

755
756
757
    if directml_enabled:
        return False

758
759
760
761
762
    if mps_mode():
        return True

    if cpu_mode():
        return False
763

764
    if is_intel_xpu():
comfyanonymous's avatar
comfyanonymous committed
765
766
        return True

767
    if torch.version.hip:
768
769
        return True

comfyanonymous's avatar
comfyanonymous committed
770
    props = torch.cuda.get_device_properties("cuda")
771
772
773
    if props.major >= 8:
        return True

774
775
776
777
778
779
780
    if props.major < 6:
        return False

    fp16_works = False
    #FP16 is confirmed working on a 1080 (GP104) but it's a bit slower than FP32 so it should only be enabled
    #when the model doesn't actually fit on the card
    #TODO: actually test if GP106 and others have the same type of behavior
781
    nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050", "p40", "p100", "p6", "p4"]
782
783
784
785
    for x in nvidia_10_series:
        if x in props.name.lower():
            fp16_works = True

786
    if fp16_works or manual_cast:
787
        free_model_memory = (get_free_memory() * 0.9 - minimum_inference_memory())
788
        if (not prioritize_performance) or model_params * 4 > free_model_memory:
789
790
            return True

791
792
793
    if props.major < 7:
        return False

794
    #FP16 is just broken on these cards
795
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX", "T2000", "T1000", "T1200"]
796
797
798
799
800
801
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

802
803
804
805
806
807
808
809
810
def should_use_bf16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
    if device is not None:
        if is_device_cpu(device): #TODO ? bf16 works on CPU but is extremely slow
            return False

    if device is not None: #TODO not sure about mps bf16 support
        if is_device_mps(device):
            return False

811
812
813
    if FORCE_FP32:
        return False

814
815
816
817
818
819
    if directml_enabled:
        return False

    if cpu_mode() or mps_mode():
        return False

comfyanonymous's avatar
comfyanonymous committed
820
821
822
823
824
825
826
827
828
829
    if is_intel_xpu():
        return True

    if device is None:
        device = torch.device("cuda")

    props = torch.cuda.get_device_properties(device)
    if props.major >= 8:
        return True

830
831
832
833
834
835
836
    bf16_works = torch.cuda.is_bf16_supported()

    if bf16_works or manual_cast:
        free_model_memory = (get_free_memory() * 0.9 - minimum_inference_memory())
        if (not prioritize_performance) or model_params * 4 > free_model_memory:
            return True

comfyanonymous's avatar
comfyanonymous committed
837
838
    return False

839
def soft_empty_cache(force=False):
840
841
    global cpu_state
    if cpu_state == CPUState.MPS:
comfyanonymous's avatar
comfyanonymous committed
842
        torch.mps.empty_cache()
843
    elif is_intel_xpu():
844
845
        torch.xpu.empty_cache()
    elif torch.cuda.is_available():
846
        if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda
847
848
849
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()

850
851
852
853
def unload_all_models():
    free_memory(1e30, get_torch_device())


854
def resolve_lowvram_weight(weight, model, key): #TODO: remove
comfyanonymous's avatar
comfyanonymous committed
855
856
    return weight

857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()