model_management.py 9.31 KB
Newer Older
1
2
3
import psutil
from enum import Enum
from cli_args import args
4

5
6
7
8
9
10
11
class VRAMState(Enum):
    CPU = 0
    NO_VRAM = 1
    LOW_VRAM = 2
    NORMAL_VRAM = 3
    HIGH_VRAM = 4
    MPS = 5
12

13
14
15
# Determine VRAM State
vram_state = VRAMState.NORMAL_VRAM
set_vram_to = VRAMState.NORMAL_VRAM
16

17
total_vram = 0
18
19
total_vram_available_mb = -1

20
accelerate_enabled = False
藍+85CD's avatar
藍+85CD committed
21
xpu_available = False
22

23
24
try:
    import torch
藍+85CD's avatar
藍+85CD committed
25
26
27
28
29
30
    try:
        import intel_extension_for_pytorch as ipex
        if torch.xpu.is_available():
            xpu_available = True
            total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024)
    except:
31
        total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
32
    total_ram = psutil.virtual_memory().total / (1024 * 1024)
33
    if not args.normalvram and not args.cpu:
34
35
        if total_vram <= 4096:
            print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
36
            set_vram_to = VRAMState.LOW_VRAM
comfyanonymous's avatar
comfyanonymous committed
37
        elif total_vram > total_ram * 1.1 and total_vram > 14336:
38
            print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
39
            vram_state = VRAMState.HIGH_VRAM
40
41
42
except:
    pass

43
44
45
46
47
try:
    OOM_EXCEPTION = torch.cuda.OutOfMemoryError
except:
    OOM_EXCEPTION = Exception

48
49
if args.disable_xformers:
    XFORMERS_IS_AVAILABLE = False
50
51
52
53
else:
    try:
        import xformers
        import xformers.ops
54
        XFORMERS_IS_AVAILABLE = True
55
    except:
56
        XFORMERS_IS_AVAILABLE = False
57

58
59
ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention
if ENABLE_PYTORCH_ATTENTION:
60
61
62
    torch.backends.cuda.enable_math_sdp(True)
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_mem_efficient_sdp(True)
63
    XFORMERS_IS_AVAILABLE = False
64

65
66
67
68
69
70
if args.lowvram:
    set_vram_to = VRAMState.LOW_VRAM
elif args.novram:
    set_vram_to = VRAMState.NO_VRAM
elif args.highvram:
    vram_state = VRAMState.HIGH_VRAM
71

72
73
74
75
76
FORCE_FP32 = False
if args.force_fp32:
    print("Forcing FP32, if this improves things please report it.")
    FORCE_FP32 = True

77

78
if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
79
80
81
82
83
84
85
86
    try:
        import accelerate
        accelerate_enabled = True
        vram_state = set_vram_to
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        print("ERROR: COULD NOT ENABLE LOW VRAM MODE.")
87
88

    total_vram_available_mb = (total_vram - 1024) // 2
89
    total_vram_available_mb = int(max(256, total_vram_available_mb))
90

91
92
try:
    if torch.backends.mps.is_available():
93
        vram_state = VRAMState.MPS
94
95
96
except:
    pass

97
98
if args.cpu:
    vram_state = VRAMState.CPU
99

100
print(f"Set vram state to: {vram_state.name}")
101

102
103

current_loaded_model = None
comfyanonymous's avatar
comfyanonymous committed
104
current_gpu_controlnets = []
105

106
107
108
model_accelerated = False


109
110
def unload_model():
    global current_loaded_model
111
    global model_accelerated
comfyanonymous's avatar
comfyanonymous committed
112
    global current_gpu_controlnets
113
114
    global vram_state

115
    if current_loaded_model is not None:
116
117
118
119
        if model_accelerated:
            accelerate.hooks.remove_hook_from_submodules(current_loaded_model.model)
            model_accelerated = False

120
        #never unload models from GPU on high vram
121
        if vram_state != VRAMState.HIGH_VRAM:
122
            current_loaded_model.model.cpu()
123
124
        current_loaded_model.unpatch_model()
        current_loaded_model = None
125

126
    if vram_state != VRAMState.HIGH_VRAM:
127
128
129
130
        if len(current_gpu_controlnets) > 0:
            for n in current_gpu_controlnets:
                n.cpu()
            current_gpu_controlnets = []
131
132
133
134


def load_model_gpu(model):
    global current_loaded_model
135
136
137
    global vram_state
    global model_accelerated

138
139
140
141
142
143
144
145
146
    if model is current_loaded_model:
        return
    unload_model()
    try:
        real_model = model.patch_model()
    except Exception as e:
        model.unpatch_model()
        raise e
    current_loaded_model = model
147
    if vram_state == VRAMState.CPU:
148
        pass
149
    elif vram_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
150
151
152
        mps_device = torch.device("mps")
        real_model.to(mps_device)
        pass
153
    elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM:
154
        model_accelerated = False
comfyanonymous's avatar
comfyanonymous committed
155
        real_model.to(get_torch_device())
156
    else:
157
        if vram_state == VRAMState.NO_VRAM:
158
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"})
159
        elif vram_state == VRAMState.LOW_VRAM:
160
            device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"})
comfyanonymous's avatar
comfyanonymous committed
161

comfyanonymous's avatar
comfyanonymous committed
162
        accelerate.dispatch_model(real_model, device_map=device_map, main_device=get_torch_device())
163
        model_accelerated = True
164
    return current_loaded_model
165

comfyanonymous's avatar
comfyanonymous committed
166
167
def load_controlnet_gpu(models):
    global current_gpu_controlnets
168
    global vram_state
169
    if vram_state == VRAMState.CPU:
170
        return
171

172
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
173
174
175
        #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
        return

comfyanonymous's avatar
comfyanonymous committed
176
177
178
179
    for m in current_gpu_controlnets:
        if m not in models:
            m.cpu()

180
    device = get_torch_device()
comfyanonymous's avatar
comfyanonymous committed
181
182
    current_gpu_controlnets = []
    for m in models:
183
        current_gpu_controlnets.append(m.to(device))
comfyanonymous's avatar
comfyanonymous committed
184

185

186
187
def load_if_low_vram(model):
    global vram_state
188
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
comfyanonymous's avatar
comfyanonymous committed
189
        return model.to(get_torch_device())
190
191
192
193
    return model

def unload_if_low_vram(model):
    global vram_state
194
    if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM:
195
196
197
        return model.cpu()
    return model

198
def get_torch_device():
199
    global xpu_available
200
    if vram_state == VRAMState.MPS:
Yurii Mazurevich's avatar
Yurii Mazurevich committed
201
        return torch.device("mps")
202
    if vram_state == VRAMState.CPU:
203
204
        return torch.device("cpu")
    else:
205
206
207
208
        if xpu_available:
            return torch.device("xpu")
        else:
            return torch.cuda.current_device()
209
210
211
212
213

def get_autocast_device(dev):
    if hasattr(dev, 'type'):
        return dev.type
    return "cuda"
214

215

216
def xformers_enabled():
217
    if vram_state == VRAMState.CPU:
218
        return False
219
    return XFORMERS_IS_AVAILABLE
220

221
222
223
224
225
226
227
228
229
230
231
232
233

def xformers_enabled_vae():
    enabled = xformers_enabled()
    if not enabled:
        return False
    try:
        #0.0.18 has a bug where Nan is returned when inputs are too big (1152x1920 res images and above)
        if xformers.version.__version__ == "0.0.18":
            return False
    except:
        pass
    return enabled

234
235
236
def pytorch_attention_enabled():
    return ENABLE_PYTORCH_ATTENTION

237
def get_free_memory(dev=None, torch_free_too=False):
238
    global xpu_available
239
    if dev is None:
240
        dev = get_torch_device()
241

Yurii Mazurevich's avatar
Yurii Mazurevich committed
242
    if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
243
244
245
        mem_free_total = psutil.virtual_memory().available
        mem_free_torch = mem_free_total
    else:
246
247
248
249
250
251
252
253
254
255
        if xpu_available:
            mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev)
            mem_free_torch = mem_free_total
        else:
            stats = torch.cuda.memory_stats(dev)
            mem_active = stats['active_bytes.all.current']
            mem_reserved = stats['reserved_bytes.all.current']
            mem_free_cuda, _ = torch.cuda.mem_get_info(dev)
            mem_free_torch = mem_reserved - mem_active
            mem_free_total = mem_free_cuda + mem_free_torch
256
257
258
259
260

    if torch_free_too:
        return (mem_free_total, mem_free_torch)
    else:
        return mem_free_total
261
262
263

def maximum_batch_area():
    global vram_state
264
    if vram_state == VRAMState.NO_VRAM:
265
266
267
268
269
        return 0

    memory_free = get_free_memory() / (1024 * 1024)
    area = ((memory_free - 1024) * 0.9) / (0.6)
    return int(max(area, 0))
270
271
272

def cpu_mode():
    global vram_state
273
    return vram_state == VRAMState.CPU
274

Yurii Mazurevich's avatar
Yurii Mazurevich committed
275
276
def mps_mode():
    global vram_state
277
    return vram_state == VRAMState.MPS
Yurii Mazurevich's avatar
Yurii Mazurevich committed
278

279
def should_use_fp16():
280
    global xpu_available
281
282
283
    if FORCE_FP32:
        return False

284
    if cpu_mode() or mps_mode() or xpu_available:
285
286
287
288
289
        return False #TODO ?

    if torch.cuda.is_bf16_supported():
        return True

comfyanonymous's avatar
comfyanonymous committed
290
    props = torch.cuda.get_device_properties("cuda")
291
292
293
294
    if props.major < 7:
        return False

    #FP32 is faster on those cards?
295
    nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
296
297
298
299
300
301
    for x in nvidia_16_series:
        if x in props.name:
            return False

    return True

302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
#TODO: might be cleaner to put this somewhere else
import threading

class InterruptProcessingException(Exception):
    pass

interrupt_processing_mutex = threading.RLock()

interrupt_processing = False
def interrupt_current_processing(value=True):
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        interrupt_processing = value

def processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        return interrupt_processing

def throw_exception_if_processing_interrupted():
    global interrupt_processing
    global interrupt_processing_mutex
    with interrupt_processing_mutex:
        if interrupt_processing:
            interrupt_processing = False
            raise InterruptProcessingException()