model_management.py 8.8 KB
Newer Older
1

2
3
4
5
CPU = 0
NO_VRAM = 1
LOW_VRAM = 2
NORMAL_VRAM = 3
6
HIGH_VRAM = 4
Yurii Mazurevich's avatar
Yurii Mazurevich committed
7
MPS = 5
藍+85CD's avatar
藍+85CD committed
8
XPU = 6
9
10
11
12

accelerate_enabled = False
vram_state = NORMAL_VRAM

13
total_vram = 0
14
15
total_vram_available_mb = -1

16
import sys
17
import psutil
18

Francesco Yoshi Gobbo's avatar
Francesco Yoshi Gobbo committed
19
20
forced_cpu = "--cpu" in sys.argv

21
22
set_vram_to = NORMAL_VRAM

23
24
25
try:
    import torch
    total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
26
27
    total_ram = psutil.virtual_memory().total / (1024 * 1024)
    forced_normal_vram = "--normalvram" in sys.argv
28
    if not forced_normal_vram and not forced_cpu:
29
30
31
        if total_vram <= 4096:
            print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
            set_vram_to = LOW_VRAM
comfyanonymous's avatar
comfyanonymous committed
32
        elif total_vram > total_ram * 1.1 and total_vram > 14336:
33
34
            print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
            vram_state = HIGH_VRAM
35
36
37
except:
    pass

38
39
40
41
42
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

43
44
if "--disable-xformers" in sys.argv:
    XFORMERS_IS_AVAILBLE = False
45
46
47
48
49
50
51
52
else:
    try:
        import xformers
        import xformers.ops
        XFORMERS_IS_AVAILBLE = True
    except:
        XFORMERS_IS_AVAILBLE = False

53
54
55
56
57
58
59
60
ENABLE_PYTORCH_ATTENTION = False
if "--use-pytorch-cross-attention" in sys.argv:
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
    ENABLE_PYTORCH_ATTENTION = True
    XFORMERS_IS_AVAILBLE = False

61

62
63
64
65
if "--lowvram" in sys.argv:
    set_vram_to = LOW_VRAM
if "--novram" in sys.argv:
    set_vram_to = NO_VRAM
66
67
if "--highvram" in sys.argv:
    vram_state = HIGH_VRAM
68

69

70
if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM:
71
72
73
74
75
76
77
78
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
79
80

    total_vram_available_mb = (total_vram - 1024) // 2
81
    total_vram_available_mb = int(max(256, total_vram_available_mb))
82

83
84
85
86
87
88
try:
    if torch.backends.mps.is_available():
        vram_state = MPS
except:
    pass

藍+85CD's avatar
藍+85CD committed
89
try:
90
    import intel_extension_for_pytorch as ipex
藍+85CD's avatar
藍+85CD committed
91
92
93
94
95
    if torch.xpu.is_available():
        vram_state = XPU
except:
    pass

Francesco Yoshi Gobbo's avatar
Francesco Yoshi Gobbo committed
96
if forced_cpu:
97
    vram_state = CPU
98

藍+85CD's avatar
藍+85CD committed
99
print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS", "XPU"][vram_state])
100

101
102

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
103
current_gpu_controlnets = []
104

105
106
107
model_accelerated = False


108
109
def unload_model():
    global current_loaded_model
110
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
111
    global current_gpu_controlnets
112
113
    global vram_state

114
    if current_loaded_model is not None:
115
116
117
118
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

119
120
121
        #never unload models from GPU on high vram
        if vram_state != HIGH_VRAM:
            current_loaded_model.model.cpu()
122
123
        current_loaded_model.unpatch_model()
        current_loaded_model = None
124
125
126
127
128
129

    if vram_state != HIGH_VRAM:
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
130
131
132
133


def load_model_gpu(model):
    global current_loaded_model
134
135
136
    global vram_state
    global model_accelerated

137
138
139
140
141
142
143
144
145
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
146
147
    if vram_state == CPU:
        pass
Yurii Mazurevich's avatar
Yurii Mazurevich committed
148
149
150
151
    elif vram_state == MPS:
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
藍+85CD's avatar
藍+85CD committed
152
153
154
    elif vram_state == XPU:
        real_model.to("xpu")
        pass
155
    elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM:
156
157
158
159
160
161
        model_accelerated = False
        real_model.cuda()
    else:
        if vram_state == NO_VRAM:
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
        elif vram_state == LOW_VRAM:
162
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
163

164
165
        accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda")
        model_accelerated = True
166
    return current_loaded_model
167

comfyanonymous's avatar
comfyanonymous committed
168
169
def load_controlnet_gpu(models):
    global current_gpu_controlnets
170
    global vram_state
171
172
    if vram_state == CPU:
        return
173
174
175
176
177

    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
178
179
180
181
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

182
    device = get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
183
184
    current_gpu_controlnets = []
    for m in models:
185
        current_gpu_controlnets.append(m.to(device))
comfyanonymous's avatar
comfyanonymous committed
186

187

188
189
190
191
192
193
194
195
196
197
198
199
def load_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cuda()
    return model

def unload_if_low_vram(model):
    global vram_state
    if vram_state == LOW_VRAM or vram_state == NO_VRAM:
        return model.cpu()
    return model

200
def get_torch_device():
Yurii Mazurevich's avatar
Yurii Mazurevich committed
201
202
    if vram_state == MPS:
        return torch.device("mps")
藍+85CD's avatar
藍+85CD committed
203
204
    if vram_state == XPU:
        return torch.device("xpu")
205
206
207
208
209
210
211
212
213
    if vram_state == CPU:
        return torch.device("cpu")
    else:
        return torch.cuda.current_device()

def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
214

215

216
217
218
219
220
def xformers_enabled():
    if vram_state == CPU:
        return False
    return XFORMERS_IS_AVAILBLE

221
222
223
224
225
226
227
228
229
230
231
232
233

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
    try:
        #0.0.18 has a bug where Nan is returned when inputs are too big (1152x1920 res images and above)
        if xformers.version.__version__ == "0.0.18":
            return False
    except:
        pass
    return enabled

234
235
236
def pytorch_attention_enabled():
    return ENABLE_PYTORCH_ATTENTION

237
238
def get_free_memory(dev=None, torch_free_too=False):
    if dev is None:
239
        dev = get_torch_device()
240

Yurii Mazurevich's avatar
Yurii Mazurevich committed
241
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
242
243
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
藍+85CD's avatar
藍+85CD committed
244
245
246
    elif hasattr(dev, 'type') and (dev.type == 'xpu'):
        mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev)
        mem_free_torch = mem_free_total
247
248
249
250
251
252
253
254
255
256
257
258
    else:
        stats = torch.cuda.memory_stats(dev)
        mem_active = stats['active_bytes.all.current']
        mem_reserved = stats['reserved_bytes.all.current']
        mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
        mem_free_torch = mem_reserved - mem_active
        mem_free_total = mem_free_cuda + mem_free_torch

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
259
260
261
262
263
264
265
266
267

def maximum_batch_area():
    global vram_state
    if vram_state == NO_VRAM:
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))
268
269
270
271
272

def cpu_mode():
    global vram_state
    return vram_state == CPU

Yurii Mazurevich's avatar
Yurii Mazurevich committed
273
274
275
276
def mps_mode():
    global vram_state
    return vram_state == MPS

藍+85CD's avatar
藍+85CD committed
277
278
279
280
def xpu_mode():
    global vram_state
    return vram_state == XPU

281
def should_use_fp16():
藍+85CD's avatar
藍+85CD committed
282
    if cpu_mode() or mps_mode() or xpu_mode():
283
284
285
286
287
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
288
    props = torch.cuda.get_device_properties("cuda")
289
290
291
292
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
293
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
294
295
296
297
298
299
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()