model_management.py 26.2 KB
Newer Older
1
2
import psutil
from enum import Enum
comfyanonymous's avatar
comfyanonymous committed
3
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
4
import comfy.utils
5
import torch
comfyanonymous's avatar
comfyanonymous committed
6
import sys
7

8
class VRAMState(Enum):
9
10
    DISABLED = 0    #No vram present: no need to move models to vram
    NO_VRAM = 1     #Very low vram: enable all the options to save vram
11
12
13
    LOW_VRAM = 2
    NORMAL_VRAM = 3
    HIGH_VRAM = 4
14
    SHARED = 5      #No dedicated vram: memory shared between CPU and GPU but models still need to be moved between both.
15
16
17
18
19

class CPUState(Enum):
    GPU = 0
    CPU = 1
    MPS = 2
20

21
22
23
# Determine VRAM State
vram_state = VRAMState.NORMAL_VRAM
set_vram_to = VRAMState.NORMAL_VRAM
24
cpu_state = CPUState.GPU
25

26
total_vram = 0
27

28
lowvram_available = True
藍+85CD's avatar
藍+85CD committed
29
xpu_available = False
30

31
32
33
34
if args.deterministic:
    print("Using deterministic algorithms for pytorch")
    torch.use_deterministic_algorithms(True, warn_only=True)

35
directml_enabled = False
36
if args.directml is not None:
37
38
    import torch_directml
    directml_enabled = True
39
40
41
42
43
44
    device_index = args.directml
    if device_index < 0:
        directml_device = torch_directml.device()
    else:
        directml_device = torch_directml.device(device_index)
    print("Using directml with device:", torch_directml.device_name(device_index))
45
    # torch_directml.disable_tiled_resources(True)
46
    lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
47

48
try:
49
50
51
    import intel_extension_for_pytorch as ipex
    if torch.xpu.is_available():
        xpu_available = True
52
53
54
except:
    pass

55
56
57
try:
    if torch.backends.mps.is_available():
        cpu_state = CPUState.MPS
KarryCharon's avatar
KarryCharon committed
58
        import torch.mps
59
60
61
62
63
64
except:
    pass

if args.cpu:
    cpu_state = CPUState.CPU

65
66
def is_intel_xpu():
    global cpu_state
67
    global xpu_available
68
69
70
71
72
73
    if cpu_state == CPUState.GPU:
        if xpu_available:
            return True
    return False

def get_torch_device():
74
    global directml_enabled
75
    global cpu_state
76
77
78
    if directml_enabled:
        global directml_device
        return directml_device
79
    if cpu_state == CPUState.MPS:
80
        return torch.device("mps")
81
    if cpu_state == CPUState.CPU:
82
83
        return torch.device("cpu")
    else:
84
        if is_intel_xpu():
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            return torch.device("xpu")
        else:
            return torch.device(torch.cuda.current_device())

def get_total_memory(dev=None, torch_total_too=False):
    global directml_enabled
    if dev is None:
        dev = get_torch_device()

    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
        mem_total = psutil.virtual_memory().total
        mem_total_torch = mem_total
    else:
        if directml_enabled:
            mem_total = 1024 * 1024 * 1024 #TODO
            mem_total_torch = mem_total
101
        elif is_intel_xpu():
102
103
            stats = torch.xpu.memory_stats(dev)
            mem_reserved = stats['reserved_bytes.all.current']
104
            mem_total = torch.xpu.get_device_properties(dev).total_memory
105
            mem_total_torch = mem_reserved
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_reserved = stats['reserved_bytes.all.current']
            _, mem_total_cuda = torch.cuda.mem_get_info(dev)
            mem_total_torch = mem_reserved
            mem_total = mem_total_cuda

    if torch_total_too:
        return (mem_total, mem_total_torch)
    else:
        return mem_total

total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024)
print("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
if not args.normalvram and not args.cpu:
    if lowvram_available and total_vram <= 4096:
        print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
        set_vram_to = VRAMState.LOW_VRAM

126
127
128
129
130
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

131
132
XFORMERS_VERSION = ""
XFORMERS_ENABLED_VAE = True
133
134
if args.disable_xformers:
    XFORMERS_IS_AVAILABLE = False
135
136
137
138
else:
    try:
        import xformers
        import xformers.ops
139
        XFORMERS_IS_AVAILABLE = True
140
141
142
143
        try:
            XFORMERS_IS_AVAILABLE = xformers._has_cpp_library
        except:
            pass
144
145
146
147
148
149
150
151
152
153
154
        try:
            XFORMERS_VERSION = xformers.version.__version__
            print("xformers version:", XFORMERS_VERSION)
            if XFORMERS_VERSION.startswith("0.0.18"):
                print()
                print("WARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
                print("Please downgrade or upgrade xformers to a different version.")
                print()
                XFORMERS_ENABLED_VAE = False
        except:
            pass
155
    except:
156
        XFORMERS_IS_AVAILABLE = False
157

158
159
160
161
162
def is_nvidia():
    global cpu_state
    if cpu_state == CPUState.GPU:
        if torch.version.cuda:
            return True
163
    return False
164

165
166
167
168
169
ENABLE_PYTORCH_ATTENTION = False
if args.use_pytorch_cross_attention:
    ENABLE_PYTORCH_ATTENTION = True
    XFORMERS_IS_AVAILABLE = False

170
VAE_DTYPE = torch.float32
171

172
173
174
175
try:
    if is_nvidia():
        torch_version = torch.version.__version__
        if int(torch_version[0]) >= 2:
176
            if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
177
                ENABLE_PYTORCH_ATTENTION = True
178
            if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
179
                VAE_DTYPE = torch.bfloat16
180
181
182
    if is_intel_xpu():
        if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
            ENABLE_PYTORCH_ATTENTION = True
183
184
185
except:
    pass

186
187
188
if is_intel_xpu():
    VAE_DTYPE = torch.bfloat16

189
190
191
if args.cpu_vae:
    VAE_DTYPE = torch.float32

192
193
194
195
196
197
198
if args.fp16_vae:
    VAE_DTYPE = torch.float16
elif args.bf16_vae:
    VAE_DTYPE = torch.bfloat16
elif args.fp32_vae:
    VAE_DTYPE = torch.float32

199

200
if ENABLE_PYTORCH_ATTENTION:
201
202
203
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
204

205
206
if args.lowvram:
    set_vram_to = VRAMState.LOW_VRAM
207
    lowvram_available = True
208
209
elif args.novram:
    set_vram_to = VRAMState.NO_VRAM
210
elif args.highvram or args.gpu_only:
211
    vram_state = VRAMState.HIGH_VRAM
212

213
FORCE_FP32 = False
214
FORCE_FP16 = False
215
216
217
218
if args.force_fp32:
    print("Forcing FP32, if this improves things please report it.")
    FORCE_FP32 = True

219
220
221
222
if args.force_fp16:
    print("Forcing FP16.")
    FORCE_FP16 = True

223
if lowvram_available:
224
225
    if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
        vram_state = set_vram_to
226

227

228
229
if cpu_state != CPUState.GPU:
    vram_state = VRAMState.DISABLED
230

231
232
if cpu_state == CPUState.MPS:
    vram_state = VRAMState.SHARED
233

234
print(f"Set vram state to: {vram_state.name}")
235

236
237
238
239
DISABLE_SMART_MEMORY = args.disable_smart_memory

if DISABLE_SMART_MEMORY:
    print("Disabling smart memory management")
240

241
242
def get_torch_device_name(device):
    if hasattr(device, 'type'):
243
        if device.type == "cuda":
244
245
246
247
248
            try:
                allocator_backend = torch.cuda.get_allocator_backend()
            except:
                allocator_backend = ""
            return "{} {} : {}".format(device, torch.cuda.get_device_name(device), allocator_backend)
249
250
        else:
            return "{}".format(device.type)
251
    elif is_intel_xpu():
252
        return "{} {}".format(device, torch.xpu.get_device_name(device))
253
254
    else:
        return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
255
256

try:
257
    print("Device:", get_torch_device_name(get_torch_device()))
258
259
260
except:
    print("Could not pick default device.")

261
print("VAE dtype:", VAE_DTYPE)
262

comfyanonymous's avatar
comfyanonymous committed
263
current_loaded_models = []
264

265
266
267
268
269
270
271
272
def module_size(module):
    module_mem = 0
    sd = module.state_dict()
    for k in sd:
        t = sd[k]
        module_mem += t.nelement() * t.element_size()
    return module_mem

comfyanonymous's avatar
comfyanonymous committed
273
274
275
276
277
class LoadedModel:
    def __init__(self, model):
        self.model = model
        self.model_accelerated = False
        self.device = model.load_device
278

comfyanonymous's avatar
comfyanonymous committed
279
280
    def model_memory(self):
        return self.model.model_size()
281

comfyanonymous's avatar
comfyanonymous committed
282
283
284
285
286
    def model_memory_required(self, device):
        if device == self.model.current_device:
            return 0
        else:
            return self.model_memory()
287

comfyanonymous's avatar
comfyanonymous committed
288
289
290
291
    def model_load(self, lowvram_model_memory=0):
        patch_model_to = None
        if lowvram_model_memory == 0:
            patch_model_to = self.device
292

comfyanonymous's avatar
comfyanonymous committed
293
294
        self.model.model_patches_to(self.device)
        self.model.model_patches_to(self.model.model_dtype())
295

comfyanonymous's avatar
comfyanonymous committed
296
297
298
299
300
301
        try:
            self.real_model = self.model.patch_model(device_to=patch_model_to) #TODO: do something with loras and offloading to CPU
        except Exception as e:
            self.model.unpatch_model(self.model.offload_device)
            self.model_unload()
            raise e
302

comfyanonymous's avatar
comfyanonymous committed
303
304
        if lowvram_model_memory > 0:
            print("loading in lowvram mode", lowvram_model_memory/(1024 * 1024))
305
306
307
308
309
            mem_counter = 0
            for m in self.real_model.modules():
                if hasattr(m, "comfy_cast_weights"):
                    m.prev_comfy_cast_weights = m.comfy_cast_weights
                    m.comfy_cast_weights = True
310
                    module_mem = module_size(m)
311
312
313
                    if mem_counter + module_mem < lowvram_model_memory:
                        m.to(self.device)
                        mem_counter += module_mem
314
315
316
317
                elif hasattr(m, "weight"): #only modules with comfy_cast_weights can be set to lowvram mode
                    m.to(self.device)
                    mem_counter += module_size(m)
                    print("lowvram: loaded module regularly", m)
318

comfyanonymous's avatar
comfyanonymous committed
319
            self.model_accelerated = True
320

321
        if is_intel_xpu() and not args.disable_ipex_optimize:
322
            self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True)
323

comfyanonymous's avatar
comfyanonymous committed
324
        return self.real_model
325

comfyanonymous's avatar
comfyanonymous committed
326
327
    def model_unload(self):
        if self.model_accelerated:
328
329
330
331
332
            for m in self.real_model.modules():
                if hasattr(m, "prev_comfy_cast_weights"):
                    m.comfy_cast_weights = m.prev_comfy_cast_weights
                    del m.prev_comfy_cast_weights

comfyanonymous's avatar
comfyanonymous committed
333
            self.model_accelerated = False
334

comfyanonymous's avatar
comfyanonymous committed
335
336
        self.model.unpatch_model(self.model.offload_device)
        self.model.model_patches_to(self.model.offload_device)
337

comfyanonymous's avatar
comfyanonymous committed
338
339
    def __eq__(self, other):
        return self.model is other.model
comfyanonymous's avatar
comfyanonymous committed
340

comfyanonymous's avatar
comfyanonymous committed
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
def minimum_inference_memory():
    return (1024 * 1024 * 1024)

def unload_model_clones(model):
    to_unload = []
    for i in range(len(current_loaded_models)):
        if model.is_clone(current_loaded_models[i].model):
            to_unload = [i] + to_unload

    for i in to_unload:
        print("unload clone", i)
        current_loaded_models.pop(i).model_unload()

def free_memory(memory_required, device, keep_loaded=[]):
    unloaded_model = False
    for i in range(len(current_loaded_models) -1, -1, -1):
comfyanonymous's avatar
comfyanonymous committed
357
358
359
        if not DISABLE_SMART_MEMORY:
            if get_free_memory(device) > memory_required:
                break
comfyanonymous's avatar
comfyanonymous committed
360
361
362
        shift_model = current_loaded_models[i]
        if shift_model.device == device:
            if shift_model not in keep_loaded:
comfyanonymous's avatar
comfyanonymous committed
363
364
365
                m = current_loaded_models.pop(i)
                m.model_unload()
                del m
comfyanonymous's avatar
comfyanonymous committed
366
367
368
369
                unloaded_model = True

    if unloaded_model:
        soft_empty_cache()
370
371
372
373
374
    else:
        if vram_state != VRAMState.HIGH_VRAM:
            mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True)
            if mem_free_torch > mem_free_total * 0.25:
                soft_empty_cache()
comfyanonymous's avatar
comfyanonymous committed
375
376

def load_models_gpu(models, memory_required=0):
377
378
    global vram_state

comfyanonymous's avatar
comfyanonymous committed
379
380
381
382
383
384
385
386
387
388
389
390
391
    inference_memory = minimum_inference_memory()
    extra_mem = max(inference_memory, memory_required)

    models_to_load = []
    models_already_loaded = []
    for x in models:
        loaded_model = LoadedModel(x)

        if loaded_model in current_loaded_models:
            index = current_loaded_models.index(loaded_model)
            current_loaded_models.insert(0, current_loaded_models.pop(index))
            models_already_loaded.append(loaded_model)
        else:
392
393
            if hasattr(x, "model"):
                print(f"Requested to load {x.model.__class__.__name__}")
comfyanonymous's avatar
comfyanonymous committed
394
395
396
397
398
399
400
            models_to_load.append(loaded_model)

    if len(models_to_load) == 0:
        devs = set(map(lambda a: a.device, models_already_loaded))
        for d in devs:
            if d != torch.device("cpu"):
                free_memory(extra_mem, d, models_already_loaded)
401
402
        return

403
    print(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
404

comfyanonymous's avatar
comfyanonymous committed
405
406
407
408
    total_memory_required = {}
    for loaded_model in models_to_load:
        unload_model_clones(loaded_model.model)
        total_memory_required[loaded_model.device] = total_memory_required.get(loaded_model.device, 0) + loaded_model.model_memory_required(loaded_model.device)
comfyanonymous's avatar
comfyanonymous committed
409

comfyanonymous's avatar
comfyanonymous committed
410
411
412
    for device in total_memory_required:
        if device != torch.device("cpu"):
            free_memory(total_memory_required[device] * 1.3 + extra_mem, device, models_already_loaded)
comfyanonymous's avatar
comfyanonymous committed
413

comfyanonymous's avatar
comfyanonymous committed
414
415
416
417
418
419
420
421
422
423
424
    for loaded_model in models_to_load:
        model = loaded_model.model
        torch_dev = model.load_device
        if is_device_cpu(torch_dev):
            vram_set_state = VRAMState.DISABLED
        else:
            vram_set_state = vram_state
        lowvram_model_memory = 0
        if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM):
            model_size = loaded_model.model_memory_required(torch_dev)
            current_free_mem = get_free_memory(torch_dev)
425
            lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 ))
comfyanonymous's avatar
comfyanonymous committed
426
427
428
429
            if model_size > (current_free_mem - inference_memory): #only switch to lowvram if really necessary
                vram_set_state = VRAMState.LOW_VRAM
            else:
                lowvram_model_memory = 0
430

comfyanonymous's avatar
comfyanonymous committed
431
        if vram_set_state == VRAMState.NO_VRAM:
432
            lowvram_model_memory = 64 * 1024 * 1024
433

comfyanonymous's avatar
comfyanonymous committed
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
        cur_loaded_model = loaded_model.model_load(lowvram_model_memory)
        current_loaded_models.insert(0, loaded_model)
    return


def load_model_gpu(model):
    return load_models_gpu([model])

def cleanup_models():
    to_delete = []
    for i in range(len(current_loaded_models)):
        if sys.getrefcount(current_loaded_models[i].model) <= 2:
            to_delete = [i] + to_delete

    for i in to_delete:
        x = current_loaded_models.pop(i)
        x.model_unload()
        del x
452

453
454
455
456
def dtype_size(dtype):
    dtype_size = 4
    if dtype == torch.float16 or dtype == torch.bfloat16:
        dtype_size = 2
457
458
459
460
461
462
463
    elif dtype == torch.float32:
        dtype_size = 4
    else:
        try:
            dtype_size = dtype.itemsize
        except: #Old pytorch doesn't have .itemsize
            pass
464
465
    return dtype_size

466
def unet_offload_device():
comfyanonymous's avatar
comfyanonymous committed
467
    if vram_state == VRAMState.HIGH_VRAM:
468
469
470
471
        return get_torch_device()
    else:
        return torch.device("cpu")

comfyanonymous's avatar
comfyanonymous committed
472
473
474
475
476
477
def unet_inital_load_device(parameters, dtype):
    torch_dev = get_torch_device()
    if vram_state == VRAMState.HIGH_VRAM:
        return torch_dev

    cpu_dev = torch.device("cpu")
478
479
480
    if DISABLE_SMART_MEMORY:
        return cpu_dev

481
    model_size = dtype_size(dtype) * parameters
comfyanonymous's avatar
comfyanonymous committed
482
483
484
485
486
487
488
489

    mem_dev = get_free_memory(torch_dev)
    mem_cpu = get_free_memory(cpu_dev)
    if mem_dev > mem_cpu and model_size < mem_dev:
        return torch_dev
    else:
        return cpu_dev

comfyanonymous's avatar
comfyanonymous committed
490
def unet_dtype(device=None, model_params=0, supported_dtypes=[torch.float16, torch.bfloat16, torch.float32]):
491
492
    if args.bf16_unet:
        return torch.bfloat16
493
494
    if args.fp16_unet:
        return torch.float16
495
496
497
498
    if args.fp8_e4m3fn_unet:
        return torch.float8_e4m3fn
    if args.fp8_e5m2_unet:
        return torch.float8_e5m2
499
    if should_use_fp16(device=device, model_params=model_params, manual_cast=True):
comfyanonymous's avatar
comfyanonymous committed
500
501
        if torch.float16 in supported_dtypes:
            return torch.float16
502
    if should_use_bf16(device, model_params=model_params, manual_cast=True):
comfyanonymous's avatar
comfyanonymous committed
503
504
        if torch.bfloat16 in supported_dtypes:
            return torch.bfloat16
505
506
    return torch.float32

507
# None means no manual cast
comfyanonymous's avatar
comfyanonymous committed
508
def unet_manual_cast(weight_dtype, inference_device, supported_dtypes=[torch.float16, torch.bfloat16, torch.float32]):
509
510
511
    if weight_dtype == torch.float32:
        return None

comfyanonymous's avatar
comfyanonymous committed
512
    fp16_supported = should_use_fp16(inference_device, prioritize_performance=False)
513
514
515
    if fp16_supported and weight_dtype == torch.float16:
        return None

comfyanonymous's avatar
comfyanonymous committed
516
517
518
519
520
    bf16_supported = should_use_bf16(inference_device)
    if bf16_supported and weight_dtype == torch.bfloat16:
        return None

    if fp16_supported and torch.float16 in supported_dtypes:
521
        return torch.float16
comfyanonymous's avatar
comfyanonymous committed
522
523
524

    elif bf16_supported and torch.bfloat16 in supported_dtypes:
        return torch.bfloat16
525
526
527
    else:
        return torch.float32

528
def text_encoder_offload_device():
comfyanonymous's avatar
comfyanonymous committed
529
    if args.gpu_only:
530
531
532
533
        return get_torch_device()
    else:
        return torch.device("cpu")

534
def text_encoder_device():
comfyanonymous's avatar
comfyanonymous committed
535
    if args.gpu_only:
536
        return get_torch_device()
537
    elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
538
539
        if is_intel_xpu():
            return torch.device("cpu")
540
        if should_use_fp16(prioritize_performance=False):
541
542
543
            return get_torch_device()
        else:
            return torch.device("cpu")
544
545
546
    else:
        return torch.device("cpu")

547
548
549
550
551
552
553
554
555
556
def text_encoder_dtype(device=None):
    if args.fp8_e4m3fn_text_enc:
        return torch.float8_e4m3fn
    elif args.fp8_e5m2_text_enc:
        return torch.float8_e5m2
    elif args.fp16_text_enc:
        return torch.float16
    elif args.fp32_text_enc:
        return torch.float32

557
558
559
    if is_device_cpu(device):
        return torch.float16

560
561
    return torch.float16

562

563
564
565
566
567
568
def intermediate_device():
    if args.gpu_only:
        return get_torch_device()
    else:
        return torch.device("cpu")

569
def vae_device():
570
571
    if args.cpu_vae:
        return torch.device("cpu")
572
573
574
    return get_torch_device()

def vae_offload_device():
comfyanonymous's avatar
comfyanonymous committed
575
    if args.gpu_only:
576
577
578
579
        return get_torch_device()
    else:
        return torch.device("cpu")

580
def vae_dtype():
581
582
    global VAE_DTYPE
    return VAE_DTYPE
583

584
585
586
587
def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
588

589
590
591
def supports_dtype(device, dtype): #TODO
    if dtype == torch.float32:
        return True
592
    if is_device_cpu(device):
593
594
595
596
597
598
599
        return False
    if dtype == torch.float16:
        return True
    if dtype == torch.bfloat16:
        return True
    return False

600
601
602
603
604
def device_supports_non_blocking(device):
    if is_device_mps(device):
        return False #pytorch bug? mps doesn't support non blocking
    return True

605
606
607
608
609
610
611
def cast_to_device(tensor, device, dtype, copy=False):
    device_supports_cast = False
    if tensor.dtype == torch.float32 or tensor.dtype == torch.float16:
        device_supports_cast = True
    elif tensor.dtype == torch.bfloat16:
        if hasattr(device, 'type') and device.type.startswith("cuda"):
            device_supports_cast = True
612
613
        elif is_intel_xpu():
            device_supports_cast = True
614

615
    non_blocking = device_supports_non_blocking(device)
comfyanonymous's avatar
comfyanonymous committed
616

617
618
619
    if device_supports_cast:
        if copy:
            if tensor.device == device:
comfyanonymous's avatar
comfyanonymous committed
620
621
                return tensor.to(dtype, copy=copy, non_blocking=non_blocking)
            return tensor.to(device, copy=copy, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking)
622
        else:
comfyanonymous's avatar
comfyanonymous committed
623
            return tensor.to(device, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking)
624
    else:
comfyanonymous's avatar
comfyanonymous committed
625
        return tensor.to(device, dtype, copy=copy, non_blocking=non_blocking)
626

627
def xformers_enabled():
628
    global directml_enabled
629
630
    global cpu_state
    if cpu_state != CPUState.GPU:
631
        return False
632
    if is_intel_xpu():
633
634
635
        return False
    if directml_enabled:
        return False
636
    return XFORMERS_IS_AVAILABLE
637

638
639
640
641
642

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
643
644

    return XFORMERS_ENABLED_VAE
645

646
def pytorch_attention_enabled():
647
    global ENABLE_PYTORCH_ATTENTION
648
649
    return ENABLE_PYTORCH_ATTENTION

650
651
652
653
def pytorch_attention_flash_attention():
    global ENABLE_PYTORCH_ATTENTION
    if ENABLE_PYTORCH_ATTENTION:
        #TODO: more reliable way of checking for flash attention?
654
        if is_nvidia(): #pytorch flash attention only works on Nvidia
655
656
657
            return True
    return False

658
def get_free_memory(dev=None, torch_free_too=False):
659
    global directml_enabled
660
    if dev is None:
661
        dev = get_torch_device()
662

Yurii Mazurevich's avatar
Yurii Mazurevich committed
663
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
664
665
666
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
667
668
669
        if directml_enabled:
            mem_free_total = 1024 * 1024 * 1024 #TODO
            mem_free_torch = mem_free_total
670
        elif is_intel_xpu():
671
672
673
674
675
            stats = torch.xpu.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_allocated = stats['allocated_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_torch = mem_reserved - mem_active
676
            mem_free_total = torch.xpu.get_device_properties(dev).total_memory - mem_allocated
677
678
679
680
681
682
683
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
            mem_free_torch = mem_reserved - mem_active
            mem_free_total = mem_free_cuda + mem_free_torch
684
685
686
687
688

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
689

690
def cpu_mode():
691
692
    global cpu_state
    return cpu_state == CPUState.CPU
693

Yurii Mazurevich's avatar
Yurii Mazurevich committed
694
def mps_mode():
695
696
    global cpu_state
    return cpu_state == CPUState.MPS
Yurii Mazurevich's avatar
Yurii Mazurevich committed
697

698
def is_device_type(device, type):
699
    if hasattr(device, 'type'):
700
        if (device.type == type):
comfyanonymous's avatar
comfyanonymous committed
701
702
703
            return True
    return False

704
705
706
def is_device_cpu(device):
    return is_device_type(device, 'cpu')

comfyanonymous's avatar
comfyanonymous committed
707
def is_device_mps(device):
708
709
710
711
    return is_device_type(device, 'mps')

def is_device_cuda(device):
    return is_device_type(device, 'cuda')
712

713
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
714
715
    global directml_enabled

716
717
718
719
    if device is not None:
        if is_device_cpu(device):
            return False

720
721
722
    if FORCE_FP16:
        return True

723
    if device is not None:
724
        if is_device_mps(device):
725
            return True
726

727
728
729
    if FORCE_FP32:
        return False

730
731
732
    if directml_enabled:
        return False

733
734
735
736
737
    if mps_mode():
        return True

    if cpu_mode():
        return False
738

739
    if is_intel_xpu():
comfyanonymous's avatar
comfyanonymous committed
740
741
        return True

742
    if torch.version.hip:
743
744
        return True

comfyanonymous's avatar
comfyanonymous committed
745
    props = torch.cuda.get_device_properties("cuda")
746
747
748
    if props.major >= 8:
        return True

749
750
751
752
753
754
755
    if props.major < 6:
        return False

    fp16_works = False
    #FP16 is confirmed working on a 1080 (GP104) but it's a bit slower than FP32 so it should only be enabled
    #when the model doesn't actually fit on the card
    #TODO: actually test if GP106 and others have the same type of behavior
756
    nvidia_10_series = ["1080", "1070", "titan x", "p3000", "p3200", "p4000", "p4200", "p5000", "p5200", "p6000", "1060", "1050", "p40", "p100", "p6", "p4"]
757
758
759
760
    for x in nvidia_10_series:
        if x in props.name.lower():
            fp16_works = True

761
    if fp16_works or manual_cast:
762
        free_model_memory = (get_free_memory() * 0.9 - minimum_inference_memory())
763
        if (not prioritize_performance) or model_params * 4 > free_model_memory:
764
765
            return True

766
767
768
    if props.major < 7:
        return False

769
    #FP16 is just broken on these cards
770
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX", "T2000", "T1000", "T1200"]
771
772
773
774
775
776
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

777
778
779
780
781
782
783
784
785
def should_use_bf16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
    if device is not None:
        if is_device_cpu(device): #TODO ? bf16 works on CPU but is extremely slow
            return False

    if device is not None: #TODO not sure about mps bf16 support
        if is_device_mps(device):
            return False

786
787
788
    if FORCE_FP32:
        return False

789
790
791
792
793
794
    if directml_enabled:
        return False

    if cpu_mode() or mps_mode():
        return False

comfyanonymous's avatar
comfyanonymous committed
795
796
797
798
799
800
801
802
803
804
    if is_intel_xpu():
        return True

    if device is None:
        device = torch.device("cuda")

    props = torch.cuda.get_device_properties(device)
    if props.major >= 8:
        return True

805
806
807
808
809
810
811
    bf16_works = torch.cuda.is_bf16_supported()

    if bf16_works or manual_cast:
        free_model_memory = (get_free_memory() * 0.9 - minimum_inference_memory())
        if (not prioritize_performance) or model_params * 4 > free_model_memory:
            return True

comfyanonymous's avatar
comfyanonymous committed
812
813
    return False

814
def soft_empty_cache(force=False):
815
816
    global cpu_state
    if cpu_state == CPUState.MPS:
comfyanonymous's avatar
comfyanonymous committed
817
        torch.mps.empty_cache()
818
    elif is_intel_xpu():
819
820
        torch.xpu.empty_cache()
    elif torch.cuda.is_available():
821
        if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda
822
823
824
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()

825
826
827
828
def unload_all_models():
    free_memory(1e30, get_torch_device())


829
def resolve_lowvram_weight(weight, model, key): #TODO: remove
comfyanonymous's avatar
comfyanonymous committed
830
831
    return weight

832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()